repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
JSAT | JSAT-master/JSAT/src/jsat/datatransform/PCA.java |
package jsat.datatransform;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.DataPoint;
import jsat.linear.DenseMatrix;
import jsat.linear.DenseVector;
import jsat.linear.Matrix;
import jsat.linear.Vec;
/**
* Principle Component Analysis is a method that attempts to create a basis of
* the given space that maintains the variance in the data set while eliminating
* correlation of the variables.
* <br>
* When a full basis is formed, the dimensionality will remain the same,
* but the data will be transformed to a new space. <br>
* PCA is particularly useful when a small number of basis can explain
* most of the variance in the data set that is not related to noise,
* maintaining information while reducing the dimensionality of the data.
* <br><br>
* PCA works only on the numerical attributes of a data set. <br>
* For PCA to work correctly, a {@link ZeroMeanTransform} should
* be applied to the data set first. If not done, the first
* dimension of PCA may contain noise and become uninformative,
* possibly throwing off the computation of the other PCs
*
* @author Edward Raff
* @see ZeroMeanTransform
*/
public class PCA implements DataTransform
{
private static final long serialVersionUID = 8736609877239941617L;
/**
* The transposed matrix of the Principal Components
*/
private Matrix P;
private int maxPCs;
private double threshold;
/**
* Creates a new object for performing PCA that stops at 50 principal components. This may not be optimal for any particular dataset
*
*/
public PCA()
{
this(50);
}
/**
* Performs PCA analysis using the given data set, so that transformations may be performed on future data points. <br>
* <br>
* NOTE: The maximum number of PCs will be learned until a convergence threshold is meet. It is possible that the
* number of PCs computed will be equal to the number of dimensions, meaning no dimensionality reduction has
* occurred, but a transformation of the dimensions into a new space.
*
* @param dataSet the data set to learn from
*/
public PCA(DataSet dataSet)
{
this(dataSet, Integer.MAX_VALUE);
}
/**
* Performs PCA analysis using the given data set, so that transformations may be performed on future data points.
*
* @param dataSet the data set to learn from
* @param maxPCs the maximum number of Principal Components to let the algorithm learn. The algorithm may stop
* earlier if all the variance has been explained, or the convergence threshold has been met.
* Note, the computable maximum number of PCs is limited to the minimum of the number of samples and the
* number of dimensions.
*/
public PCA(DataSet dataSet, int maxPCs)
{
this(dataSet, maxPCs, 1e-4);
}
/**
* Creates a new object for performing PCA
*
* @param maxPCs the maximum number of Principal Components to let the
* algorithm learn. The algorithm may stop earlier if all the variance has
* been explained, or the convergence threshold has been met. Note, the
* computable maximum number of PCs is limited to the minimum of the number
* of samples and the number of dimensions.
*/
public PCA(int maxPCs)
{
this(maxPCs, 1e-4);
}
/**
* Creates a new object for performing PCA
*
* @param maxPCs the maximum number of Principal Components to let the algorithm learn. The algorithm may stop
* earlier if all the variance has been explained, or the convergence threshold has been met.
* Note, the computable maximum number of PCs is limited to the minimum of the number of samples and the
* number of dimensions.
* @param threshold a convergence threshold, any small value will work. Smaller values will
* not produce more accurate results, but may make the algorithm take longer if it would
* have terminated before <tt>maxPCs</tt> was reached.
*/
public PCA(int maxPCs, double threshold)
{
setMaxPCs(maxPCs);
setThreshold(threshold);
}
/**
* Performs PCA analysis using the given data set, so that transformations may be performed on future data points.
*
* @param dataSet the data set to learn from
* @param maxPCs the maximum number of Principal Components to let the algorithm learn. The algorithm may stop
* earlier if all the variance has been explained, or the convergence threshold has been met.
* Note, the computable maximum number of PCs is limited to the minimum of the number of samples and the
* number of dimensions.
* @param threshold a convergence threshold, any small value will work. Smaller values will
* not produce more accurate results, but may make the algorithm take longer if it would
* have terminated before <tt>maxPCs</tt> was reached.
*/
public PCA(DataSet dataSet, int maxPCs, double threshold)
{
this(maxPCs, threshold);
fit(dataSet);
}
@Override
public void fit(DataSet dataSet)
{
//Edwad, don't forget. This is: Nonlinear Iterative PArtial Least Squares (NIPALS) algo
List<Vec> scores = new ArrayList<Vec>();
List<Vec> loadings = new ArrayList<Vec>();
//E(0) = X The E-matrix for the zero-th PC
//Contains the unexplained variance in the data at each step.
Matrix E = dataSet.getDataMatrix();
//This is the MAX number of possible Principlal Components
int PCs = Math.min(dataSet.size(), dataSet.getNumNumericalVars());
PCs = Math.min(maxPCs, PCs);
Vec t = getColumn(E);
double tauOld = t.dot(t);
Vec p = new DenseVector(E.cols());
for(int i = 1; i <= PCs; i++)
{
for(int iter = 0; iter < 100; iter++)
{
//1. Project X onto t to and the corresponding loading p
//p = (E[i-1]' * t) / (t'*t)
p.zeroOut();
E.transposeMultiply(1.0, t, p);
p.mutableDivide(tauOld);
//2. Normalise loading vector p to length 1
//p = p * (p'*p)^-0.5
p.mutableMultiply(Math.pow(p.dot(p), -0.5));
//3. Project X onto p to find corresponding score vector t
//t = (E[i-1] p)/(p'*p)
t = E.multiply(p);
t.mutableDivide(p.dot(p));
//4. Check for convergence.
double tauNew = t.dot(t);
if(iter > 0 && Math.abs(tauNew-tauOld) <= threshold*tauNew || iter == 99)//go at least one round
{
scores.add(new DenseVector(t));
loadings.add(new DenseVector(p));
break;
}
tauOld = tauNew;
}
//5. Remove the estimated PC component from E[i-1]
Matrix.OuterProductUpdate(E, t, p, -1.0);
}
P = new DenseMatrix(loadings.size(), loadings.get(0).length());
for(int i = 0; i < loadings.size(); i++)
{
Vec pi = loadings.get(i);
for(int j = 0; j < pi.length(); j++)
P.set(i, j, pi.get(j));
}
}
/**
* Copy constructor
* @param other the transform to copy
*/
private PCA(PCA other)
{
if(other.P != null)
this.P = other.P.clone();
this.maxPCs = other.maxPCs;
this.threshold = other.threshold;
}
/**
* sets the maximum number of principal components to learn
* @param maxPCs the maximum number of principal components to learn
*/
public void setMaxPCs(int maxPCs)
{
if(maxPCs <= 0)
throw new IllegalArgumentException("number of principal components must be a positive number, not " + maxPCs);
this.maxPCs = maxPCs;
}
/**
*
* @return maximum number of principal components to learn
*/
public int getMaxPCs()
{
return maxPCs;
}
/**
*
* @param threshold the threshold for convergence of the algorithm
*/
public void setThreshold(double threshold)
{
if(threshold <= 0 || Double.isInfinite(threshold) || Double.isNaN(threshold))
throw new IllegalArgumentException("threshold must be in the range (0, Inf), not " + threshold);
this.threshold = threshold;
}
public double getThreshold()
{
return threshold;
}
/**
* Returns the first non zero column
* @param x the matrix to get a column from
* @return the first non zero column
*/
private static Vec getColumn(Matrix x)
{
Vec t;
for(int i = 0; i < x.cols(); i++)
{
t = x.getColumn(i);
if(t.dot(t) > 0 )
return t;
}
throw new ArithmeticException("Matrix is essentially zero");
}
@Override
public DataPoint transform(DataPoint dp)
{
DataPoint newDP = new DataPoint(
P.multiply(dp.getNumericalValues()),
Arrays.copyOf(dp.getCategoricalValues(), dp.numCategoricalValues()),
CategoricalData.copyOf(dp.getCategoricalData()));
return newDP;
}
@Override
public DataTransform clone()
{
return new PCA(this);
}
}
| 9,649 | 33.837545 | 136 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/PNormNormalization.java | package jsat.datatransform;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.Vec;
/**
* PNormNormalization transformation performs normalizations of a vector x by
* one its p-norms where p is in (0, Infinity)
*
* @author Edward Raff
*/
public class PNormNormalization implements InPlaceTransform
{
private static final long serialVersionUID = 2934569881395909607L;
private double p;
/**
* Creates a new object that normalizes based on the 2-norm
*/
public PNormNormalization()
{
this(2.0);
}
/**
* Creates a new p norm
* @param p the norm to use
*/
public PNormNormalization(double p)
{
if(p <= 0 || Double.isNaN(p))
throw new IllegalArgumentException("p must be greater than zero, not " + p);
this.p = p;
}
@Override
public void fit(DataSet data)
{
//no-op, nothing needs to be done
}
@Override
public DataPoint transform(DataPoint dp)
{
DataPoint dpNew = dp.clone();
mutableTransform(dpNew);
return dpNew;
}
@Override
public void mutableTransform(DataPoint dp)
{
Vec vec = dp.getNumericalValues();
double norm = vec.pNorm(p);
if(norm != 0)
vec.mutableDivide(norm);
}
@Override
public boolean mutatesNominal()
{
return false;
}
@Override
public PNormNormalization clone()
{
return new PNormNormalization(p);
}
}
| 1,551 | 19.972973 | 88 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/PolynomialTransform.java | package jsat.datatransform;
import java.util.Arrays;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
/**
* A transform for applying a polynomial transformation on the data set. As the
* dimension of the data set grows, the number of new features created by a
* polynomial transform grows rapidly. It is recommended only for small
* dimension problems using small degrees.
*
* @author Edward Raff
*/
public class PolynomialTransform implements DataTransform
{
private static final long serialVersionUID = -5332216444253168283L;
private int degree;
/**
* Creates a new polynomial transform of the given degree
* @param degree the degree of the polynomial
* @throws ArithmeticException if the degree is not greater than 1
*/
public PolynomialTransform(int degree)
{
if(degree < 2)
throw new ArithmeticException("The degree of the polynomial was a nonsense value: " + degree);
this.degree = degree;
}
@Override
public void fit(DataSet data)
{
//no-op, nothing needs to be done
}
@Override
public DataPoint transform(DataPoint dp)
{
Vec x = dp.getNumericalValues();
int[] setTo = new int[x.length()];
//TODO compute final size directly isntead of doing a pre loop
int finalSize = 0;
int curCount = increment(setTo, degree, 0);
do
{
finalSize++;
curCount = increment(setTo, degree, curCount);
}
while(setTo[x.length()-1] <= degree);
double[] newVec = new double[finalSize];
Arrays.fill(newVec, 1.0);
int index = 0;
Arrays.fill(setTo, 0);
curCount = increment(setTo, degree, 0);
do
{
for(int i = 0; i < setTo.length; i++)
if(setTo[i] > 0)
newVec[index] *= Math.pow(x.get(i), setTo[i]);
index++;
curCount = increment(setTo, degree, curCount);
}
while(setTo[x.length()-1] <= degree);
return new DataPoint(new DenseVector(newVec), dp.getCategoricalValues(),
dp.getCategoricalData());
}
/**
* Increments the array to contain representation of the next combination of
* values in the polynomial
*
* @param setTo the array of values marking how many multiples of that value
* will be used in construction of the point
* @param max the degree of the polynomial
* @param curCount the current sum of all counts in the array <tt>setTo</tt>
* @return the new value of <tt>curCount</tt>
*/
private int increment(int[] setTo, int max, int curCount)
{
setTo[0]++;
curCount++;
if(curCount <= max)
return curCount;
int carryPos = 0;
while(carryPos < setTo.length-1 && curCount > max)
{
curCount-=setTo[carryPos];
setTo[carryPos] = 0;
setTo[++carryPos]++;
curCount++;
}
return curCount;
}
@Override
public DataTransform clone()
{
return new PolynomialTransform(degree);
}
/**
* Sets the degree of the polynomial to transform the input vector into
*
* @param degree the positive degree to use
*/
public void setDegree(int degree)
{
if (degree < 1)
throw new IllegalArgumentException("Degree must be a positive value, not " + degree);
this.degree = degree;
}
/**
* Returns the polynomial degree to use
*
* @return the polynomial degree to use
*/
public int getDegree()
{
return degree;
}
}
| 3,866 | 26.621429 | 106 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/ProjectionTransform.java | /*
* Copyright (C) 2021 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.datatransform;
import java.util.Arrays;
import jsat.DataSet;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.DataPoint;
import jsat.linear.Matrix;
import jsat.linear.Vec;
/**
* This class is used as a base class for simple linear projections of a
* dataset. You must pass in the projection you want to use at construction. It
* should be used only if you need a temporary transform object (will not be
* saved) and know how you want to transform it, or to extend for use by another
* class.
*
*
* @author Edward Raff
*/
public class ProjectionTransform implements DataTransform
{
protected Matrix P;
protected Vec b;
/**
*
* @param P the projection matrix
* @param b an offset to apply after projection (i.e., bias terms)
*/
public ProjectionTransform(Matrix P, Vec b)
{
this.P = P;
this.b = b;
}
public ProjectionTransform(ProjectionTransform toClone)
{
this(toClone.P.clone(), toClone.b.clone());
}
@Override
public DataPoint transform(DataPoint dp)
{
Vec x_new = P.multiply(dp.getNumericalValues());
x_new.mutableAdd(b);
DataPoint newDP = new DataPoint(
x_new,
Arrays.copyOf(dp.getCategoricalValues(), dp.numCategoricalValues()),
CategoricalData.copyOf(dp.getCategoricalData()));
return newDP;
}
@Override
public void fit(DataSet data)
{
//NOP
}
@Override
public ProjectionTransform clone()
{
return new ProjectionTransform(this);
}
}
| 2,201 | 25.53012 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/RemoveAttributeTransform.java | package jsat.datatransform;
import java.util.*;
import jsat.DataSet;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.DataPoint;
import jsat.linear.*;
import jsat.utils.IntList;
import jsat.utils.IntSet;
/**
* This Data Transform allows the complete removal of specific features from the
* data set.
*
* @author Edward Raff
*/
public class RemoveAttributeTransform implements DataTransform
{
private static final long serialVersionUID = 2803223213862922734L;
/*
* Each index map maps the old indecies in the original data set to their
* new positions. The value in the array is old index, the index of the
* value is the index it would be when the attributes were removed.
* This means each is in sorted order, and is of the size of the resulting
* feature space
*/
protected int[] catIndexMap;
protected int[] numIndexMap;
protected CategoricalData[] newCatHeader;
private Set<Integer> categoricalToRemove;
private Set<Integer> numericalToRemove;
/**
* Empty constructor that may be used by extending classes. Transforms that
* extend this will need to call
* {@link #setUp(jsat.DataSet, java.util.Set, java.util.Set) } once the
* attributes to remove have been selected
*/
protected RemoveAttributeTransform()
{
}
/**
* Creates a new transform for removing specified features from a data set.
* Needs to still call {@link #fit(jsat.DataSet) } before ready to be used.
*
* @param categoricalToRemove the set of categorical attributes to remove,
* in the rage of [0, {@link DataSet#getNumCategoricalVars() }).
* @param numericalToRemove the set of numerical attributes to remove, in
* the rage of [0, {@link DataSet#getNumNumericalVars() }).
*/
public RemoveAttributeTransform(Set<Integer> categoricalToRemove, Set<Integer> numericalToRemove)
{
this.categoricalToRemove = categoricalToRemove;
this.numericalToRemove = numericalToRemove;
}
/**
* Creates a new transform for removing specified features from a data set
* @param dataSet the data set that this transform is meant for
* @param categoricalToRemove the set of categorical attributes to remove, in the rage of [0, {@link DataSet#getNumCategoricalVars() }).
* @param numericalToRemove the set of numerical attributes to remove, in the rage of [0, {@link DataSet#getNumNumericalVars() }).
*/
public RemoveAttributeTransform(DataSet dataSet, Set<Integer> categoricalToRemove, Set<Integer> numericalToRemove)
{
this.categoricalToRemove = categoricalToRemove;
this.numericalToRemove = numericalToRemove;
setUp(dataSet, categoricalToRemove, numericalToRemove);
}
/**
* Returns an unmodifiable list of the original indices of the numeric
* attributes that will be kept when this transform is applied.
* @return the numeric indices that are not removed by this transform
*/
public List<Integer> getKeptNumeric()
{
return IntList.unmodifiableView(numIndexMap, numIndexMap.length);
}
/**
* Returns a mapping from the numeric indices in the transformed space back
* to their original indices
*
* @return a mapping from the transformed numeric space to the original one
*/
public Map<Integer, Integer> getReverseNumericMap()
{
Map<Integer, Integer> map = new HashMap<Integer, Integer>();
for(int newIndex = 0; newIndex < numIndexMap.length; newIndex++)
map.put(newIndex, numIndexMap[newIndex]);
return map;
}
/**
* Returns an unmodifiable list of the original indices of the nominal
* attributes that will be kept when this transform is applied.
* @return the nominal indices that are not removed by this transform
*/
public List<Integer> getKeptNominal()
{
return IntList.unmodifiableView(catIndexMap, catIndexMap.length);
}
/**
* Returns a mapping from the nominal indices in the transformed space back
* to their original indices
*
* @return a mapping from the transformed nominal space to the original one
*/
public Map<Integer, Integer> getReverseNominalMap()
{
Map<Integer, Integer> map = new HashMap<Integer, Integer>();
for(int newIndex = 0; newIndex < catIndexMap.length; newIndex++)
map.put(newIndex, catIndexMap[newIndex]);
return map;
}
@Override
public void fit(DataSet data)
{
if (catIndexMap == null || numIndexMap == null)
setUp(data, categoricalToRemove, numericalToRemove);
}
/**
* Sets up the Remove Attribute Transform properly
*
* @param dataSet the data set to remove the attributes from
* @param categoricalToRemove the categorical attributes to remove
* @param numericalToRemove the numeric attributes to remove
*/
protected final void setUp(DataSet dataSet, Set<Integer> categoricalToRemove, Set<Integer> numericalToRemove)
{
for(int i : categoricalToRemove)
if (i >= dataSet.getNumCategoricalVars())
throw new RuntimeException("The data set does not have a categorical value " + i + " to remove");
for(int i : numericalToRemove)
if (i >= dataSet.getNumNumericalVars())
throw new RuntimeException("The data set does not have a numercal value " + i + " to remove");
catIndexMap = new int[dataSet.getNumCategoricalVars()-categoricalToRemove.size()];
newCatHeader = new CategoricalData[catIndexMap.length];
numIndexMap = new int[dataSet.getNumNumericalVars()-numericalToRemove.size()];
int k = 0;
for(int i = 0; i < dataSet.getNumCategoricalVars(); i++)
{
if(categoricalToRemove.contains(i))
continue;
newCatHeader[k] = dataSet.getCategories()[i].clone();
catIndexMap[k++] = i;
}
k = 0;
for(int i = 0; i < dataSet.getNumNumericalVars(); i++)
{
if(numericalToRemove.contains(i))
continue;
numIndexMap[k++] = i;
}
}
/**
* Copy constructor
* @param other the transform to copy
*/
protected RemoveAttributeTransform(RemoveAttributeTransform other)
{
if(other.categoricalToRemove != null)
this.categoricalToRemove = new IntSet(other.categoricalToRemove);
if(other.numericalToRemove != null)
this.numericalToRemove = new IntSet(other.numericalToRemove);
if(other.catIndexMap != null)
this.catIndexMap = Arrays.copyOf(other.catIndexMap, other.catIndexMap.length);
if(other.numIndexMap != null)
this.numIndexMap = Arrays.copyOf(other.numIndexMap, other.numIndexMap.length);
if(other.newCatHeader != null)
{
this.newCatHeader = new CategoricalData[other.newCatHeader.length];
for(int i = 0; i < this.newCatHeader.length; i++)
this.newCatHeader[i] = other.newCatHeader[i].clone();
}
}
/**
* A serious of Remove Attribute Transforms may be learned and applied
* sequentially to a single data set. Instead of keeping all the transforms
* around indefinitely, a sequential series of Remove Attribute Transforms
* can be consolidated into a single transform object. <br>
* This method mutates the this transform by providing it with the
* transform that would have been applied before this current object. Once
* complete, this transform can be used two perform both removals in one
* step.<br><br>
* Example: <br>
* An initial set of features <i>A</i> is transformed into <i>A'</i> by
* transform t<sub>1</sub><br>
* <i>A'</i> is transformed into <i>A''</i> by transform t<sub>2</sub><br>
* Instead, you can invoke t<sub>2</sub>.consolidate(t<sub>1</sub>).
* You can then transform <i>A</i> into <i>A''</i> by using only transform
* t<sub>2</sub>
*
*
* @param preceding the DataTransform that immediately precedes this one in
* a sequential list of transforms
*/
public void consolidate(RemoveAttributeTransform preceding)
{
for(int i = 0; i < catIndexMap.length; i++)
catIndexMap[i] = preceding.catIndexMap[catIndexMap[i]];
for(int i = 0; i < numIndexMap.length; i++)
numIndexMap[i] = preceding.numIndexMap[numIndexMap[i]];
}
@Override
public DataPoint transform(DataPoint dp)
{
int[] catVals = dp.getCategoricalValues();
Vec numVals = dp.getNumericalValues();
CategoricalData[] newCatData = new CategoricalData[catIndexMap.length];
int[] newCatVals = new int[newCatData.length];
Vec newNumVals;
if (numVals.isSparse())
if (numVals instanceof SparseVector)
newNumVals = new SparseVector(numIndexMap.length, ((SparseVector) numVals).nnz());
else
newNumVals = new SparseVector(numIndexMap.length);
else
newNumVals = new DenseVector(numIndexMap.length);
for(int i = 0; i < catIndexMap.length; i++)
{
newCatVals[i] = catVals[catIndexMap[i]];
newCatData[i] = dp.getCategoricalData()[catIndexMap[i]];
}
int k = 0;
Iterator<IndexValue> iter = numVals.getNonZeroIterator();
if (iter.hasNext())//if all values are zero, nothing to do
{
IndexValue curIV = iter.next();
for (int i = 0; i < numIndexMap.length; i++)//i is the old index
{
if (numVals.isSparse())//log(n) insert and loopups to avoid!
{
if (curIV == null)
continue;
if (numIndexMap[i] > curIV.getIndex())//We skipped a value that existed
while (numIndexMap[i] > curIV.getIndex() && iter.hasNext())
curIV = iter.next();
if (numIndexMap[i] < curIV.getIndex())//Index is zero, nothing to set
continue;
else if (numIndexMap[i] == curIV.getIndex())
{
newNumVals.set(i, curIV.getValue());
if (iter.hasNext())
curIV = iter.next();
else
curIV = null;
}
}
else//All dense, just set them all
newNumVals.set(i, numVals.get(numIndexMap[i]));
}
}
return new DataPoint(newNumVals, newCatVals, newCatData);
}
@Override
public RemoveAttributeTransform clone()
{
return new RemoveAttributeTransform(this);
}
}
| 11,042 | 38.580645 | 141 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/StandardizeTransform.java |
package jsat.datatransform;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.Vec;
/**
* This transform performs standardization of the data, which makes each column
* have a mean of zero and a variance of one. This assume the data comes from a
* normal distribution and scales it to the unit normal distribution.
* <br><br>
* This transform is equivalent to applying {@link ZeroMeanTransform} followed
* by {@link UnitVarianceTransform}.
*
* @author Edward Raff
*/
public class StandardizeTransform implements InPlaceTransform
{
private static final long serialVersionUID = -2349721113741805955L;
private Vec means;
private Vec stdDevs;
/**
* Creates a new object for Standardizing datasets
*/
public StandardizeTransform()
{
}
/**
* Creates a new object for standaidizing datasets fit to the given dataset
* @param dataset the dataset to learn how to standardize from
*/
public StandardizeTransform(DataSet dataset)
{
fit(dataset);
}
@Override
public void fit(DataSet dataset)
{
Vec[] vecs = dataset.getColumnMeanVariance();
means = vecs[0];
stdDevs = vecs[1];
for(int i = 0; i < stdDevs.length(); i++)//convert variant to stndDev
stdDevs.set(i, Math.sqrt(stdDevs.get(i)+1e-10));
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public StandardizeTransform(StandardizeTransform toCopy)
{
this.means = toCopy.means.clone();
this.stdDevs = toCopy.stdDevs.clone();
}
@Override
public DataPoint transform(DataPoint dp)
{
DataPoint newDP = dp.clone();
mutableTransform(newDP);
return newDP;
}
@Override
public void mutableTransform(DataPoint dp)
{
Vec toAlter = dp.getNumericalValues();
toAlter.mutableSubtract(means);
toAlter.mutablePairwiseDivide(stdDevs);
}
@Override
public boolean mutatesNominal()
{
return false;
}
@Override
public StandardizeTransform clone()
{
return new StandardizeTransform(this);
}
}
| 2,217 | 23.373626 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/UnitVarianceTransform.java |
package jsat.datatransform;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.Vec;
/**
* Creates a transform to alter data points so that each attribute has a
* standard deviation of 1, which means a variance of 1.
*
* @author Edward Raff
*/
public class UnitVarianceTransform implements InPlaceTransform
{
private static final long serialVersionUID = 3645532503475641917L;
private Vec stndDevs;
/**
* Creates a new object for transforming datasets
*/
public UnitVarianceTransform()
{
}
/**
* Creates a new object for making datasets unit variance fit to the given
* dataset
*
* @param d the dataset to learn this transform from
*/
public UnitVarianceTransform(DataSet d)
{
fit(d);
}
@Override
public void fit(DataSet d)
{
stndDevs = d.getColumnMeanVariance()[1];
}
/**
* Copy constructor
* @param other the transform to make a copy of
*/
private UnitVarianceTransform(UnitVarianceTransform other)
{
this.stndDevs = other.stndDevs.clone();
}
@Override
public DataPoint transform(DataPoint dp)
{
DataPoint newDp = dp.clone();
mutableTransform(newDp);
return newDp;
}
@Override
public void mutableTransform(DataPoint dp)
{
dp.getNumericalValues().mutablePairwiseDivide(stndDevs);
}
@Override
public boolean mutatesNominal()
{
return false;
}
@Override
public DataTransform clone()
{
return new UnitVarianceTransform(this);
}
}
| 1,646 | 19.5875 | 78 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/WhitenedPCA.java | package jsat.datatransform;
import java.util.Comparator;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.distributions.Distribution;
import jsat.distributions.discrete.UniformDiscrete;
import jsat.linear.*;
import static jsat.linear.MatrixStatistics.*;
/**
* An extension of {@link PCA} that attempts to capture the variance, and make
* the variables in the output space independent from each-other. An of equal
* scale, so that the covariance is equal to {@link Matrix#eye(int) I}. The
* results may be further from the identity matrix than desired as the target
* dimension shrinks<br>
* <br>
* The Whitened PCA is more computational expensive than the normal PCA
* algorithm, but transforming the data takes the same time.
*
* @author Edward Raff
*/
public class WhitenedPCA extends DataTransformBase
{
private static final long serialVersionUID = 6134243673037330608L;
/**
* Regularization parameter
*/
protected double regularization;
/**
* The number of dimensions to project down to
*/
protected int dimensions;
/**
* The final transformation matrix, that will create new points
* <tt>y</tt> = <tt>transform</tt> * x
*/
protected Matrix transform;
/**
* Creates a new WhitenedPCA transform that uses up to 50 dimensions for the
* transformed space. This may not be optimal for any given dataset.
*
* @param dims the number of dimensions to project down to
*/
public WhitenedPCA()
{
this(50);
}
/**
* Creates a new WhitenedPCA transform
*
* @param dims the number of dimensions to project down to
*/
public WhitenedPCA(int dims)
{
this(1e-4, dims);
}
/**
* Creates a new WhitenedPCA transform
*
* @param regularization the amount of regularization to add, avoids
* numerical instability
* @param dims the number of dimensions to project down to
*/
public WhitenedPCA(double regularization, int dims)
{
setRegularization(regularization);
setDimensions(dims);
}
/**
* Creates a new WhitenedPCA from the given dataset
* @param dataSet the data set to whiten
* @param regularization the amount of regularization to add, avoids numerical instability
* @param dims the number of dimensions to project down to
*/
public WhitenedPCA(DataSet dataSet, double regularization, int dims)
{
this(regularization, dims);
fit(dataSet);
}
@Override
public void fit(DataSet dataSet)
{
setUpTransform(getSVD(dataSet));
}
/**
* Creates a new WhitenedPCA, the dimensions will be chosen so that the
* subset of dimensions is of full rank.
*
* @param dataSet the data set to whiten
* @param regularization the amount of regularization to add, avoids numerical instability
*/
public WhitenedPCA(DataSet dataSet, double regularization)
{
setRegularization(regularization);
SingularValueDecomposition svd = getSVD(dataSet);
setDimensions(svd.getRank());
setUpTransform(svd);
}
/**
* Creates a new WhitenedPCA. The dimensions will be chosen so that the
* subset of dimensions is of full rank. The regularization parameter will be
* chosen as the log of the condition of the covariance.
*
* @param dataSet the data set to whiten
*/
public WhitenedPCA(DataSet dataSet)
{
SingularValueDecomposition svd = getSVD(dataSet);
setRegularization(svd);
setDimensions(svd.getRank());
setUpTransform(svd);
}
/**
* Creates a new WhitenedPCA. The regularization parameter will be
* chosen as the log of the condition of the covariance.
*
* @param dataSet the data set to whiten
* @param dims the number of dimensions to project down to
*/
public WhitenedPCA(DataSet dataSet, int dims)
{
SingularValueDecomposition svd = getSVD(dataSet);
setRegularization(svd);
setDimensions(dims);
setUpTransform(svd);
}
/**
* Copy constructor
* @param other the transform to make a copy of
*/
private WhitenedPCA(WhitenedPCA other)
{
this.regularization = other.regularization;
this.dimensions = other.dimensions;
this.transform = other.transform.clone();
}
/**
* Gets a SVD for the covariance matrix of the data set
* @param dataSet the data set in question
* @return the SVD for the covariance
*/
private SingularValueDecomposition getSVD(DataSet dataSet)
{
Matrix cov = covarianceMatrix(meanVector(dataSet), dataSet);
for(int i = 0; i < cov.rows(); i++)//force it to be symmetric
for(int j = 0; j < i; j++)
cov.set(j, i, cov.get(i, j));
EigenValueDecomposition evd = new EigenValueDecomposition(cov);
//Sort form largest to smallest
evd.sortByEigenValue(new Comparator<Double>()
{
@Override
public int compare(Double o1, Double o2)
{
return -Double.compare(o1, o2);
}
});
return new SingularValueDecomposition(evd.getVRaw(), evd.getVRaw(), evd.getRealEigenvalues());
}
/**
* Creates the {@link #transform transform matrix} to be used when
* converting data points. It is called in the constructor after all values
* are set.
*
* @param svd the SVD of the covariance of the source data set
*/
protected void setUpTransform(SingularValueDecomposition svd)
{
Vec diag = new DenseVector(dimensions);
double[] s = svd.getSingularValues();
for(int i = 0; i < dimensions; i++)
diag.set(i, 1.0/Math.sqrt(s[i]+regularization));
transform = new SubMatrix(svd.getU().transpose(), 0, 0, dimensions, s.length).clone();
Matrix.diagMult(diag, transform);
}
@Override
public DataPoint transform(DataPoint dp)
{
Vec newVec = transform.multiply(dp.getNumericalValues());
DataPoint newDp = new DataPoint(newVec, dp.getCategoricalValues(), dp.getCategoricalData());
return newDp;
}
/**
*
* @param regularization the regularization to apply to the diagonal of the
* decomposition. This can improve numeric stability and reduces noise.
*/
public void setRegularization(double regularization)
{
if(regularization < 0 || Double.isNaN(regularization) || Double.isInfinite(regularization))
throw new ArithmeticException("Regularization must be non negative value, not " + regularization);
this.regularization = regularization;
}
/**
*
* @return the amount of regularization to apply
*/
public double getRegularization()
{
return regularization;
}
@Override
public DataTransform clone()
{
return new WhitenedPCA(this);
}
private void setRegularization(SingularValueDecomposition svd)
{
if(svd.isFullRank())
setRegularization(1e-10);
else
setRegularization(Math.max(Math.log(1.0+svd.getSingularValues()[svd.getRank()])*0.25, 1e-4));
}
/**
* Sets the number of dimensions to project down to
*
* @param dimensions the feature size to project down to
*/
public void setDimensions(int dimensions)
{
if (dimensions < 1)
throw new IllegalArgumentException("Number of dimensions must be positive, not " + dimensions);
this.dimensions = dimensions;
}
/**
* Returns the number of dimensions to project down to
*
* @return the number of dimensions to project down to
*/
public int getDimensions()
{
return dimensions;
}
public static Distribution guessDimensions(DataSet d)
{
//TODO improve using SVD rank
if(d.getNumNumericalVars() < 100)
return new UniformDiscrete(1, d.getNumNumericalVars());
return new UniformDiscrete(20, 100);
}
}
| 8,325 | 29.166667 | 110 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/WhitenedZCA.java | package jsat.datatransform;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.*;
/**
* An extension of {@link WhitenedPCA}, is the Whitened Zero Component Analysis.
* Whitened ZCA can not project to a lower dimension, as it rotates the output
* in the original dimension.
*
* @author Edward Raff
*/
public class WhitenedZCA extends WhitenedPCA implements InPlaceTransform
{
private static final long serialVersionUID = 7546033727733619587L;
private ThreadLocal<Vec> tempVecs;
/**
* Creates a new WhitenedZCA transform that uses up to 50 dimensions for the
* transformed space. This may not be optimal for any given dataset.
*
* @param dims the number of dimensions to project down to
*/
public WhitenedZCA()
{
this(50);
}
/**
* Creates a new WhitenedZCA transform
*
* @param dims the number of dimensions to project down to
*/
public WhitenedZCA(int dims)
{
this(1e-4, dims);
}
/**
* Creates a new WhitenedZCA transform
*
* @param regularization the amount of regularization to add, avoids
* numerical instability
* @param dims the number of dimensions to project down to
*/
public WhitenedZCA(double regularization, int dims)
{
setRegularization(regularization);
setDimensions(dims);
}
/**
* Creates a new Whitened ZCA transform from the given data.
*
* @param dataSet the data set to whiten
* @param regularization the amount of regularization to add, avoids
* numerical instability
*/
public WhitenedZCA(DataSet dataSet, double regularization)
{
super(dataSet, regularization);
}
/**
* Creates a new Whitened ZCA transform from the given data. The
* regularization parameter will be chosen as the log of the condition of
* the covariance.
*
* @param dataSet the data set to whiten
*/
public WhitenedZCA(DataSet dataSet)
{
super(dataSet);
}
@Override
public void fit(DataSet dataSet)
{
super.fit(dataSet);
tempVecs = getThreadLocal(dataSet.getNumNumericalVars());
}
@Override
public void mutableTransform(DataPoint dp)
{
Vec target = tempVecs.get();
target.zeroOut();
transform.multiply(dp.getNumericalValues(), 1.0, target);
target.copyTo(dp.getNumericalValues());
}
@Override
public boolean mutatesNominal()
{
return false;
}
@Override
protected void setUpTransform(SingularValueDecomposition svd)
{
double[] s = svd.getSingularValues();
Vec diag = new DenseVector(s.length);
for(int i = 0; i < s.length; i++)
diag.set(i, 1.0/Math.sqrt(s[i]+regularization));
Matrix U = svd.getU();
transform = U.multiply(Matrix.diag(diag)).multiply(U.transpose());
}
private ThreadLocal<Vec> getThreadLocal(final int dim)
{
return new ThreadLocal<Vec>()
{
@Override
protected Vec initialValue()
{
return new DenseVector(dim);
}
};
}
}
| 3,248 | 24.186047 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/ZeroMeanTransform.java |
package jsat.datatransform;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
/**
* A transformation to shift all numeric variables so that their mean is zero
*
* @author Edward Raff
*/
public class ZeroMeanTransform implements InPlaceInvertibleTransform
{
private static final long serialVersionUID = -7411115746918116163L;
/**
* Shift vector stores the mean value of each variable in the original data set.
*/
private Vec shiftVector;
/**
* Creates a new object for transforming datapoints by centering the data
*/
public ZeroMeanTransform()
{
}
/**
* Creates a new object for transforming datapoints by centering the data
* @param dataset the data to learn this transform from
*/
public ZeroMeanTransform(DataSet dataset)
{
fit(dataset);
}
@Override
public void fit(DataSet dataset)
{
shiftVector = new DenseVector(dataset.getNumNumericalVars());
shiftVector = dataset.getColumnMeanVariance()[0];
}
/**
* Copy constructor
* @param other the transform to make a copy of
*/
private ZeroMeanTransform(ZeroMeanTransform other)
{
this.shiftVector = other.shiftVector.clone();
}
@Override
public DataPoint transform(DataPoint dp)
{
DataPoint newDP = dp.clone();
mutableTransform(newDP);
return newDP;
}
@Override
public void mutableInverse(DataPoint dp)
{
dp.getNumericalValues().mutableAdd(shiftVector);
}
@Override
public DataPoint inverse(DataPoint dp)
{
DataPoint newDP = dp.clone();
mutableInverse(dp);
return newDP;
}
@Override
public void mutableTransform(DataPoint dp)
{
dp.getNumericalValues().mutableSubtract(shiftVector);
}
@Override
public boolean mutatesNominal()
{
return false;
}
@Override
public ZeroMeanTransform clone()
{
return new ZeroMeanTransform(this);
}
}
| 2,118 | 21.072917 | 85 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/featureselection/BDS.java | package jsat.datatransform.featureselection;
import java.util.*;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.Classifier;
import jsat.classifiers.DataPoint;
import jsat.datatransform.DataTransform;
import jsat.datatransform.DataTransformBase;
import jsat.datatransform.RemoveAttributeTransform;
import jsat.regression.RegressionDataSet;
import jsat.regression.Regressor;
import jsat.utils.IntSet;
import jsat.utils.ListUtils;
import jsat.utils.random.RandomUtil;
/**
* Bidirectional Search (BDS) is a greedy method of selecting a subset
* of features to use for prediction. It performs both {@link SFS} and
* {@link SBS} search at the same time. At each step, a feature is greedily
* added to one set, and then a feature greedily removed from another set.
* Once a feature is added / removed in one set, it is unavailable for selection
* in the other. This can be used to select up to half of the original features.
*
* @author Edward Raff
*/
public class BDS implements DataTransform
{
private static final long serialVersionUID = 8633823674617843754L;
private RemoveAttributeTransform finalTransform;
private Set<Integer> catSelected;
private Set<Integer> numSelected;
private int featureCount;
private int folds;
private Object evaluator;
/**
* Copy constructor
*
* @param toClone
*/
public BDS(BDS toClone)
{
this.featureCount = toClone.featureCount;
this.folds = toClone.folds;
this.evaluator = toClone.evaluator;
if(toClone.finalTransform != null)
{
this.finalTransform = toClone.finalTransform.clone();
this.catSelected = new IntSet(toClone.catSelected);
this.numSelected = new IntSet(toClone.numSelected);
}
}
/**
* Creates a BDS feature selection for a classification problem
*
* @param featureCount the number of features to select
* @param dataSet the data set to perform feature selection on
* @param evaluator the classifier to use in determining accuracy given a
* feature subset
* @param folds the number of cross validation folds to use in selection
*/
public BDS(int featureCount, Classifier evaluator, int folds)
{
setFeatureCount(featureCount);
setFolds(folds);
setEvaluator(evaluator);
}
/**
* Performs BDS feature selection for a classification problem
*
* @param featureCount the number of features to select
* @param dataSet the data set to perform feature selection on
* @param evaluator the classifier to use in determining accuracy given a
* feature subset
* @param folds the number of cross validation folds to use in selection
*/
public BDS(int featureCount, ClassificationDataSet dataSet, Classifier evaluator, int folds)
{
search(dataSet, featureCount, folds, evaluator);
}
/**
* Creates a BDS feature selection for a regression problem
*
* @param featureCount the number of features to select
* @param evaluator the regressor to use in determining accuracy given a
* feature subset
* @param folds the number of cross validation folds to use in selection
*/
public BDS(int featureCount, Regressor evaluator, int folds)
{
setFeatureCount(featureCount);
setFolds(folds);
setEvaluator(evaluator);
}
/**
* Performs BDS feature selection for a regression problem
*
* @param featureCount the number of features to select
* @param dataSet the data set to perform feature selection on
* @param evaluator the regressor to use in determining accuracy given a
* feature subset
* @param folds the number of cross validation folds to use in selection
*/
public BDS(int featureCount, RegressionDataSet dataSet, Regressor evaluator, int folds)
{
this(featureCount, evaluator, folds);
search(dataSet, featureCount, folds, evaluator);
}
@Override
public DataPoint transform(DataPoint dp)
{
return finalTransform.transform(dp);
}
@Override
public BDS clone()
{
return new BDS(this);
}
/**
* Returns a copy of the set of categorical features selected by the search
* algorithm
*
* @return the set of categorical features to use
*/
public Set<Integer> getSelectedCategorical()
{
return new IntSet(catSelected);
}
/**
* Returns a copy of the set of numerical features selected by the search
* algorithm.
*
* @return the set of numeric features to use
*/
public Set<Integer> getSelectedNumerical()
{
return new IntSet(numSelected);
}
@Override
public void fit(DataSet data)
{
search(data, featureCount, folds, evaluator);
}
private void search(DataSet dataSet, int maxFeatures, int folds, Object evaluator)
{
Random rand = RandomUtil.getRandom();
int nF = dataSet.getNumFeatures();
int nCat = dataSet.getNumCategoricalVars();
//True selected, also used for SFS
catSelected = new IntSet(dataSet.getNumCategoricalVars());
numSelected = new IntSet(dataSet.getNumNumericalVars());
//Structs for SFS side
Set<Integer> availableSFS = new IntSet();
ListUtils.addRange(availableSFS, 0, nF, 1);
Set<Integer> catToRemoveSFS = new IntSet(dataSet.getNumCategoricalVars());
Set<Integer> numToRemoveSFS = new IntSet(dataSet.getNumNumericalVars());
ListUtils.addRange(catToRemoveSFS, 0, nCat, 1);
ListUtils.addRange(numToRemoveSFS, 0, nF-nCat, 1);
///Structes fro SBS side
Set<Integer> availableSBS = new IntSet();
ListUtils.addRange(availableSBS, 0, nF, 1);
Set<Integer> catSelecteedSBS = new IntSet(dataSet.getNumCategoricalVars());
Set<Integer> numSelectedSBS = new IntSet(dataSet.getNumNumericalVars());
Set<Integer> catToRemoveSBS = new IntSet(dataSet.getNumCategoricalVars());
Set<Integer> numToRemoveSBS = new IntSet(dataSet.getNumNumericalVars());
//Start will all selected, and prune them out
ListUtils.addRange(catSelecteedSBS, 0, nCat, 1);
ListUtils.addRange(numSelectedSBS, 0, nF-nCat, 1);
double[] pBestScore0 = new double[]{Double.POSITIVE_INFINITY};
double[] pBestScore1 = new double[]{Double.POSITIVE_INFINITY};
int max = Math.min(maxFeatures, nF/2);
for(int i = 0; i < max; i++)
{
//Find and keep one good one
int mustKeep = SFS.SFSSelectFeature(availableSFS, dataSet,
catToRemoveSFS, numToRemoveSFS, catSelected,
numSelected, evaluator, folds, rand, pBestScore0, max);
availableSBS.remove(mustKeep);
SFS.removeFeature(mustKeep, nCat, catToRemoveSBS, numToRemoveSBS);
//Find and remove one bad one
int mustRemove = SBS.SBSRemoveFeature(availableSBS, dataSet,
catToRemoveSBS, numToRemoveSBS, catSelecteedSBS,
numSelectedSBS, evaluator, folds, rand, max,
pBestScore1, 0.0);
availableSFS.remove(mustRemove);
SFS.addFeature(mustRemove, nCat, catToRemoveSFS, numToRemoveSFS);
}
catSelecteedSBS.clear();
numToRemoveSBS.clear();
ListUtils.addRange(catSelecteedSBS, 0, nCat, 1);
ListUtils.addRange(numSelectedSBS, 0, nF-nCat, 1);
catSelecteedSBS.removeAll(catSelected);
numSelectedSBS.removeAll(numSelected);
finalTransform = new RemoveAttributeTransform(dataSet, catSelecteedSBS, numSelectedSBS);
}
/**
* Sets the number of features to select for use from the set of all input
* features
*
* @param featureCount the number of features to use
*/
public void setFeatureCount(int featureCount)
{
if (featureCount < 1)
throw new IllegalArgumentException("Number of features to select must be positive, not " + featureCount);
this.featureCount = featureCount;
}
/**
* Returns the number of features to use
*
* @return the number of features to use
*/
public int getFeatureCount()
{
return featureCount;
}
/**
* Sets the number of folds to use for cross validation when estimating the error rate
* @param folds the number of folds to use for cross validation when estimating the error rate
*/
public void setFolds(int folds)
{
if(folds <= 0 )
throw new IllegalArgumentException("Number of CV folds must be positive, not " + folds);
this.folds = folds;
}
/**
*
* @return the number of folds to use for cross validation when estimating the error rate
*/
public int getFolds()
{
return folds;
}
private void setEvaluator(Object evaluator)
{
this.evaluator = evaluator;
}
}
| 9,238 | 33.218519 | 117 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/featureselection/LRS.java | package jsat.datatransform.featureselection;
import java.util.*;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.Classifier;
import jsat.classifiers.DataPoint;
import jsat.datatransform.*;
import jsat.regression.RegressionDataSet;
import jsat.regression.Regressor;
import jsat.utils.IntSet;
import jsat.utils.ListUtils;
import jsat.utils.random.RandomUtil;
/**
* plus-L minus-R Selection (LRS) is a greedy method of selecting a subset
* of features to use for prediction. Its behavior is dependent upon whether L
* or R is the larger value. No mater what, L features will be greedily added to
* the set to decrease the error rate, and R features will be greedily removed
* while trying to maintain the error rate. <br>
* If L > R, then L-R features will be selected, the L step running first
* followed by R performing pruning on the found set. <br>
* If L < R, then D-R+L features will be selected, where D is the original
* number of features. First R features will be removed, and then L of the
* removed features will be added back to the final set. <br>
* L = R is not allowed.
*
* @author Edward Raff
*/
public class LRS implements DataTransform
{
private static final long serialVersionUID = 3065300352046535656L;
private RemoveAttributeTransform finalTransform;
private Set<Integer> catSelected;
private Set<Integer> numSelected;
private int L;
private int R;
private Object evaluater;
private int folds;
/**
* Copy constructor
* @param toClone the version to copy
*/
private LRS(LRS toClone)
{
this.L = toClone.L;
this.R = toClone.R;
this.folds = toClone.folds;
this.evaluater = toClone.evaluater;
if(toClone.catSelected != null)
{
this.finalTransform = toClone.finalTransform.clone();
this.catSelected = new IntSet(toClone.catSelected);
this.numSelected = new IntSet(toClone.numSelected);
}
}
/**
* Creates a LRS feature selection object for a classification problem
*
* @param L the number of features to greedily add
* @param R the number of features to greedily remove
* @param evaluater the classifier to use in determining accuracy given a
* feature subset
* @param folds the number of cross validation folds to use in selection
*/
public LRS(int L, int R, Classifier evaluater, int folds)
{
setFeaturesToAdd(L);
setFeaturesToRemove(R);
setFolds(folds);
setEvaluator(evaluater);
}
/**
* Performs LRS feature selection for a classification problem
*
* @param L the number of features to greedily add
* @param R the number of features to greedily remove
* @param cds the data set to perform feature selection on
* @param evaluater the classifier to use in determining accuracy given a
* feature subset
* @param folds the number of cross validation folds to use in selection
*/
public LRS(int L, int R, ClassificationDataSet cds, Classifier evaluater, int folds)
{
search(cds, L, R, evaluater, folds);
}
/**
* Creates a LRS feature selection object for a regression problem
*
* @param L the number of features to greedily add
* @param R the number of features to greedily remove
* @param evaluater the regressor to use in determining accuracy given a
* feature subset
* @param folds the number of cross validation folds to use in selection
*/
public LRS(int L, int R, Regressor evaluater, int folds)
{
setFeaturesToAdd(L);
setFeaturesToRemove(R);
setFolds(folds);
setEvaluator(evaluater);
}
/**
* Performs LRS feature selection for a regression problem
*
* @param L the number of features to greedily add
* @param R the number of features to greedily remove
* @param rds the data set to perform feature selection on
* @param evaluater the regressor to use in determining accuracy given a
* feature subset
* @param folds the number of cross validation folds to use in selection
*/
public LRS(int L, int R, RegressionDataSet rds, Regressor evaluater, int folds)
{
this(L, R, evaluater, folds);
search(rds, L, R, evaluater, folds);
}
@Override
public DataPoint transform(DataPoint dp)
{
return finalTransform.transform(dp);
}
@Override
public LRS clone()
{
return new LRS(this);
}
/**
* Returns a copy of the set of categorical features selected by the search
* algorithm
*
* @return the set of categorical features to use
*/
public Set<Integer> getSelectedCategorical()
{
return new IntSet(catSelected);
}
/**
* Returns a copy of the set of numerical features selected by the search
* algorithm.
*
* @return the set of numeric features to use
*/
public Set<Integer> getSelectedNumerical()
{
return new IntSet(numSelected);
}
@Override
public void fit(DataSet data)
{
search(data, L, R, evaluater, folds);
}
private void search(DataSet cds, int L, int R, Object evaluater, int folds)
{
int nF = cds.getNumFeatures();
int nCat = cds.getNumCategoricalVars();
catSelected = new IntSet(nCat);
numSelected = new IntSet(nF-nCat);
Set<Integer> catToRemove = new IntSet(nCat);
Set<Integer> numToRemove = new IntSet(nF-nCat);
Set<Integer> available = new IntSet(nF);
ListUtils.addRange(available, 0, nF, 1);
Random rand = RandomUtil.getRandom();
double[] pBestScore = new double[]{Double.POSITIVE_INFINITY};
if (L > R)
{
ListUtils.addRange(catToRemove, 0, nCat, 1);
ListUtils.addRange(numToRemove, 0, nF-nCat, 1);
//Select L features
for(int i = 0; i < L; i++)
SFS.SFSSelectFeature(available, cds, catToRemove, numToRemove,
catSelected, numSelected, evaluater, folds,
rand, pBestScore, L);
//We now restrict ourselves to the L features
available.clear();
available.addAll(catSelected);
for(int i : numSelected)
available.add(i+nCat);
//Now remove R features from the L selected
for(int i = 0; i < R; i++)
SBS.SBSRemoveFeature(available, cds, catToRemove, numToRemove,
catSelected, numSelected, evaluater, folds, rand,
L-R, pBestScore, 0.0);
}
else if(L < R)
{
ListUtils.addRange(catSelected, 0, nCat, 1);
ListUtils.addRange(numSelected, 0, nF-nCat, 1);
//Remove R features
for(int i = 0; i < R; i++)
SBS.SBSRemoveFeature(available, cds, catToRemove, numToRemove,
catSelected, numSelected, evaluater, folds, rand,
nF-R, pBestScore, 0.0);
//Now we restrict out selves to adding back the features that were removed
available.clear();
available.addAll(catToRemove);
for(int i : numToRemove)
available.add(i+nCat);
//Now add L features back
for(int i = 0; i < L; i++)
SFS.SFSSelectFeature(available, cds, catToRemove, numToRemove,
catSelected, numSelected, evaluater, folds,
rand, pBestScore, R-L);
}
finalTransform = new RemoveAttributeTransform(cds, catToRemove, numToRemove);
}
/**
* Sets the number of features to add (the L parameter).
*
* @param featuresToAdd the number of features to greedily add
*/
public void setFeaturesToAdd(int featuresToAdd)
{
if (featuresToAdd < 1)
throw new IllegalArgumentException("Number of features to add must be positive, not " + featuresToAdd);
this.L = featuresToAdd;
}
/**
* Returns the number of features to add
*
* @return the number of features to add
*/
public int getFeaturesToAdd()
{
return L;
}
/**
* Sets the number of features to remove (the R parameter).
*
* @param featuresToRemove the number of features to greedily remove
*/
public void setFeaturesToRemove(int featuresToRemove)
{
if (featuresToRemove < 1)
throw new IllegalArgumentException("Number of features to remove must be positive, not " + featuresToRemove);
this.R = featuresToRemove;
}
/**
* Returns the number of features to remove
*
* @return the number of features to remove
*/
public int getFeaturesToRemove()
{
return R;
}
/**
* Sets the number of folds to use for cross validation when estimating the error rate
* @param folds the number of folds to use for cross validation when estimating the error rate
*/
public void setFolds(int folds)
{
if(folds <= 0 )
throw new IllegalArgumentException("Number of CV folds must be positive, not " + folds);
this.folds = folds;
}
/**
*
* @return the number of folds to use for cross validation when estimating the error rate
*/
public int getFolds()
{
return folds;
}
private void setEvaluator(Object evaluator)
{
this.evaluater = evaluator;
}
}
| 9,804 | 31.792642 | 121 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/featureselection/MutualInfoFS.java | package jsat.datatransform.featureselection;
import static java.lang.Math.log;
import java.util.Set;
import jsat.DataSet;
import jsat.classifiers.*;
import jsat.datatransform.*;
import jsat.exceptions.FailedToFitException;
import jsat.linear.IndexValue;
import jsat.linear.Vec;
import jsat.utils.IndexTable;
import jsat.utils.IntSet;
/**
* Performs greedy feature selection based on Mutual Information of the features
* with respect to the class values. This is an attempt to select features that
* are discriminative for classification tasks. <br>
* The method of performing Mutual Information on numeric attributes is
* controlled by {@link NumericalHandeling}
*
* @author Edward Raff
*/
public class MutualInfoFS extends RemoveAttributeTransform
{
private static final long serialVersionUID = -4394620220403363542L;
private int featureCount;
private NumericalHandeling numericHandling;
/**
* The definition for mutual information for continuous attributes requires
* an integration of an unknown function, as such requires some form of
* approximation. This controls how the approximation is done
*/
public enum NumericalHandeling
{
/**
* Mutual information for numeric attributes is not computed, so no
* numeric attributes will be removed - and are ignored completely. The
* number of features to select does not include the numeric attributes
* in this case.
*/
NONE,
/**
* Numeric attributes are treated as nominal features with binary
* values. The false value is if the value is zero, and the true value
* is any non zero value.
*/
BINARY,
}
/**
* Creates a new Mutual Information feature selection object that attempts
* to select up to 100 features. Numeric attributes are handled by
* {@link NumericalHandeling#BINARY}
*
*/
public MutualInfoFS()
{
this(100);
}
/**
* Creates a new Mutual Information feature selection object. Numeric
* attributes are handled by {@link NumericalHandeling#BINARY}
*
* @param featureCount the number of features to select
*/
public MutualInfoFS(int featureCount)
{
this(featureCount, NumericalHandeling.BINARY);
}
/**
* Creates a new Mutual Information feature selection object. Numeric
* attributes are handled by {@link NumericalHandeling#BINARY}
*
* @param dataSet the classification data set to perform feature selection
* from
* @param featureCount the number of features to select
*/
public MutualInfoFS(ClassificationDataSet dataSet, int featureCount)
{
this(dataSet, featureCount, NumericalHandeling.BINARY);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
protected MutualInfoFS(MutualInfoFS toCopy)
{
super(toCopy);
this.featureCount = toCopy.featureCount;
this.numericHandling = toCopy.numericHandling;
}
/**
* Creates a new Mutual Information feature selection object.
*
* @param featureCount the number of features to select
* @param numericHandling the way to handle the computation of mutual
* information for numeric attributes
*/
public MutualInfoFS(int featureCount, NumericalHandeling numericHandling)
{
super();
setFeatureCount(featureCount);
setHandling(numericHandling);
}
/**
* Creates a new Mutual Information feature selection object.
*
* @param dataSet the classification data set to perform feature selection
* from
* @param featureCount the number of features to select
* @param numericHandling the way to handle the computation of mutual
* information for numeric attributes
*/
public MutualInfoFS(ClassificationDataSet dataSet, int featureCount, NumericalHandeling numericHandling)
{
this(featureCount, numericHandling);
}
@Override
public void fit(DataSet data)
{
if(!(data instanceof ClassificationDataSet))
throw new FailedToFitException("MutualInfoFS only works for classification data sets, not " + data.getClass().getSimpleName());
ClassificationDataSet dataSet = (ClassificationDataSet) data;
final int N = dataSet.size();
double[] classPriors = dataSet.getPriors();
double[] logClassPriors = new double[classPriors.length];
for(int i = 0; i < logClassPriors.length; i++)
logClassPriors[i] = log(classPriors[i]);
int numCatVars;
int consideredCount = numCatVars = dataSet.getNumCategoricalVars();
if(numericHandling != NumericalHandeling.NONE)
consideredCount = dataSet.getNumFeatures();
/**
* 1st index is the feature
* 2nd index is the option #
*/
double[][] featPriors = new double[consideredCount][];
CategoricalData[] catInfo = dataSet.getCategories();
/**
* 1st index is the feature
* 2nd index is the option #
* 3rd index is the class
*/
double[][][] jointProb = new double[consideredCount][][];
for(int i = 0; i < jointProb.length; i++)
{
if(i < dataSet.getNumCategoricalVars())//Cat value
{
int options = catInfo[i].getNumOfCategories();
jointProb[i] = new double[options][logClassPriors.length];
featPriors[i] = new double[options];
}
else//Numeric value
{
//Yes/No, but only keep track of the yes values
jointProb[i] = new double[2][logClassPriors.length];
featPriors[i] = new double[1];//feature for numeric is just 1.0-other
}
}
double weightSum = 0.0;
for(int i = 0; i < dataSet.size(); i++)
{
DataPoint dp = dataSet.getDataPoint(i);
int trueClass = dataSet.getDataPointCategory(i);
double weight = dataSet.getWeight(i);
weightSum += weight;
int[] catVals = dp.getCategoricalValues();
for(int j = 0; j < catVals.length; j++)
{
featPriors[j][catVals[j]] += weight;
jointProb[j][catVals[j]][trueClass] += weight;
}
if(numericHandling == NumericalHandeling.BINARY)
{
Vec numeric = dp.getNumericalValues();
for(IndexValue iv : numeric)
{
featPriors[iv.getIndex()+numCatVars][0] += weight;
jointProb[iv.getIndex()+numCatVars][0][trueClass] += weight;
}
}
}
/**
* Mutual Information for each index
*/
double[] mis = new double[consideredCount];
for(int i = 0; i < consideredCount; i++)
{
double mi = 0.0;
if( i < dataSet.getNumCategoricalVars())//Cat attribute
{
for(int tVal = 0; tVal < jointProb[i].length; tVal++)
{
double featPrior = featPriors[i][tVal]/weightSum;
if(featPrior == 0.0)
continue;
double logFeatPrior = log(featPrior);
for (int tClass = 0; tClass < logClassPriors.length; tClass++)
{
double jp = jointProb[i][tVal][tClass] / weightSum;
if (jp == 0)
continue;
mi += jp * (log(jp) - logFeatPrior - logClassPriors[tClass]);
}
}
}
else//Numeric attribute & it is binary
{
for(int tClass = 0; tClass < classPriors.length; tClass++)
{
double jpNeg = jointProb[i][0][tClass]/weightSum;
double jpPos = (classPriors[tClass]*N - jointProb[i][0][tClass])/weightSum;
double posPrio = featPriors[i][0]/weightSum;
double negPrio = 1.0-posPrio;
if (jpNeg != 0 && negPrio != 0)
mi += jpNeg * (log(jpNeg) - log(negPrio) - logClassPriors[tClass]);
if (jpPos != 0 && posPrio != 0)
mi += jpPos * (log(jpPos) - log(posPrio) - logClassPriors[tClass]);
}
}
mis[i] = mi;
}
IndexTable sortedOrder = new IndexTable(mis);
Set<Integer> catToRemove = new IntSet();
Set<Integer> numToRemove = new IntSet();
for(int i = 0; i < consideredCount-featureCount; i++)
{
int removingIndex = sortedOrder.index(i);
if(removingIndex < numCatVars)
catToRemove.add(removingIndex);
else
numToRemove.add(removingIndex-numCatVars);
}
setUp(dataSet, catToRemove, numToRemove);
}
@Override
public MutualInfoFS clone()
{
return new MutualInfoFS(this);
}
/**
* Sets the number of features to select
*
* @param featureCount the number of features to select
*/
public void setFeatureCount(int featureCount)
{
if (featureCount < 1)
throw new IllegalArgumentException("Number of features must be positive, not " + featureCount);
this.featureCount = featureCount;
}
/**
* Returns the number of features to select
*
* @return the number of features to select
*/
public int getFeatureCount()
{
return featureCount;
}
/**
* Sets the method of numericHandling numeric features
*
* @param handling the numeric numericHandling
*/
public void setHandling(NumericalHandeling handling)
{
this.numericHandling = handling;
}
/**
* Returns the method of numericHandling numeric features
*
* @return the method of numericHandling numeric features
*/
public NumericalHandeling getHandling()
{
return numericHandling;
}
}
| 10,442 | 32.578778 | 139 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/featureselection/ReliefF.java |
package jsat.datatransform.featureselection;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.datatransform.RemoveAttributeTransform;
import jsat.exceptions.FailedToFitException;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.linear.VecPaired;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.linear.distancemetrics.TrainableDistanceMetric;
import jsat.linear.vectorcollection.DefaultVectorCollection;
import jsat.linear.vectorcollection.VectorCollection;
import jsat.utils.FakeExecutor;
import jsat.utils.IndexTable;
import jsat.utils.IntSet;
import jsat.utils.SystemInfo;
import jsat.utils.random.RandomUtil;
/**
* Provides an implementation of the ReliefF algorithm for feature importance computing.
* Because JSAT does not support neighbor searching for categorical values, it does not
* provides weights for categorical variables. <br>
* Weight values are in the range [-1, 1]. The value is a measure of corelation, so the
* absolute value of the individual weights would form its relative importance to the
* others. <br>
* The ReliefF algorithm is meant for classification problems, and is computed in a
* nearest neighbor fashion. <br><br>
* See:<br>Kononenko, I., Simec, E.,&Robnik-Sikonja, M. (1997).
* <i><a href="http://www.springerlink.com/index/W174714344273004.pdf">
* Overcoming the myopia of inductive learning algorithms with RELIEFF</a></i>.
* Applied Intelligence, 7, 39–55.
*
* @author Edward Raff
*/
public class ReliefF extends RemoveAttributeTransform
{
private static final long serialVersionUID = -3336500245613075520L;
private double[] w;
private int featureCount;
private int iterations;
private int neighbors;
private DistanceMetric dm;
private VectorCollection<Vec> vc = new DefaultVectorCollection<>();
/**
* Creates a new ReliefF object to measure the importance of the variables
* with respect to a classification task. Only numeric features will be
* removed. Categorical features will be ignored and left in tact by the
* transformation
*
* @param featureCount the number of features to keep
*/
public ReliefF(int featureCount)
{
this(featureCount, 100, 15, new EuclideanDistance(), new DefaultVectorCollection<Vec>());
}
/**
* Creates a new ReliefF object to measure the importance of the variables with
* respect to a classification task. Only numeric features will be removed.
* Categorical features will be ignored and left in tact by the transformation
*
* @param featureCount the number of features to keep
* @param m the number of learning iterations to perform
* @param n the number of neighbors to measure importance from
* @param dm the distance metric to use
*/
public ReliefF(int featureCount, final int m, final int n, final DistanceMetric dm)
{
this(featureCount, m, n, dm, new DefaultVectorCollection<Vec>());
}
/**
* Creates a new ReliefF object to measure the importance of the variables with
* respect to a classification task. Only numeric features will be removed.
* Categorical features will be ignored and left in tact by the transformation
*
* @param cds the data set to measure numeric variable importance from
* @param featureCount the number of features to keep
* @param m the number of learning iterations to perform
* @param n the number of neighbors to measure importance from
* @param dm the distance metric to use
*/
public ReliefF(final ClassificationDataSet cds, int featureCount, final int m, final int n, final DistanceMetric dm)
{
this(cds, featureCount, m, n, dm, new DefaultVectorCollection<Vec>());
}
/**
* Creates a new ReliefF object to measure the importance of the variables with
* respect to a classification task. Only numeric features will be removed.
* Categorical features will be ignored and left in tact by the transformation
*
* @param cds the data set to measure numeric variable importance from
* @param featureCount the number of features to keep
* @param m the number of learning iterations to perform
* @param n the number of neighbors to measure importance from
* @param dm the distance metric to use
* @param threadPool the source of threads to use for the computation
*/
public ReliefF(final ClassificationDataSet cds, int featureCount, final int m, final int n, final DistanceMetric dm, ExecutorService threadPool)
{
this(cds, featureCount, m, n, dm, new DefaultVectorCollection<Vec>(), threadPool);
}
/**
* Creates a new ReliefF object to measure the importance of the variables with
* respect to a classification task. Only numeric features will be removed.
* Categorical features will be ignored and left in tact by the transformation
*
* @param cds the data set to measure numeric variable importance from
* @param featureCount the number of features to keep
* @param m the number of learning iterations to perform
* @param n the number of neighbors to measure importance from
* @param dm the distance metric to use
* @param vc the vector collection to create accelerating structures for nearest neighbor
*/
public ReliefF(final ClassificationDataSet cds, int featureCount, final int m, final int n, final DistanceMetric dm, VectorCollection<Vec> vc)
{
this(cds, featureCount, m, n, dm, vc, null);
}
/**
* copy constructor
* @param toCopy the object to copy
*/
protected ReliefF(ReliefF toCopy)
{
super(toCopy);
if(toCopy.w != null)
this.w = Arrays.copyOf(toCopy.w, toCopy.w.length);
this.dm = toCopy.dm.clone();
this.featureCount = toCopy.featureCount;
this.iterations = toCopy.iterations;
this.neighbors = toCopy.neighbors;
this.vc = toCopy.vc.clone();
}
/**
* Creates a new ReliefF object to measure the importance of the variables
* with respect to a classification task. Only numeric features will be
* removed. Categorical features will be ignored and left in tact by the
* transformation
*
* @param featureCount the number of features to keep
* @param m the number of learning iterations to perform
* @param n the number of neighbors to measure importance from
* @param dm the distance metric to use
* @param vc the factor to create accelerating structures for nearest neighbor
*/
public ReliefF(int featureCount, final int m, final int n, final DistanceMetric dm, VectorCollection<Vec> vc)
{
super();
setFeatureCount(featureCount);
setIterations(m);
setNeighbors(n);
setDistanceMetric(dm);
this.vc = vc;
}
/**
* Creates a new ReliefF object to measure the importance of the variables with
* respect to a classification task. Only numeric features will be removed.
* Categorical features will be ignored and left in tact by the transformation
*
* @param cds the data set to measure numeric variable importance from
* @param featureCount the number of features to keep
* @param m the number of learning iterations to perform
* @param n the number of neighbors to measure importance from
* @param dm the distance metric to use
* @param vcf the factor to create accelerating structures for nearest neighbor
* @param threadPool the source of threads to use for the computation
*/
public ReliefF(final ClassificationDataSet cds, int featureCount, final int m, final int n, final DistanceMetric dm, VectorCollection<Vec> vcf, ExecutorService threadPool)
{
this(featureCount, m, n, dm, vcf);
fit(cds, threadPool);
}
@Override
public void fit(DataSet data)
{
fit(data, null);
}
public void fit(DataSet data, ExecutorService threadPool)
{
if(!(data instanceof ClassificationDataSet))
throw new FailedToFitException("RelifF only works with classification datasets, not " + data.getClass().getSimpleName());
final ClassificationDataSet cds = (ClassificationDataSet) data;
this.w = new double[cds.getNumNumericalVars()];
final double[] minVals = new double[w.length];
Arrays.fill(minVals, Double.POSITIVE_INFINITY);
final double[] normalizer = new double[w.length];
Arrays.fill(normalizer, Double.NEGATIVE_INFINITY);
final double[] priors = cds.getPriors();
final List<Vec> allVecs = cds.getDataVectors();
for(Vec v : allVecs)
for(int i = 0; i < v.length(); i++)
{
minVals[i] = Math.min(minVals[i], v.get(i));
normalizer[i] = Math.max(normalizer[i], v.get(i));
}
for(int i = 0; i < normalizer.length; i++)
normalizer[i] -= minVals[i];
final List<VectorCollection< Vec>> classVC = new ArrayList<>(priors.length);
TrainableDistanceMetric.trainIfNeeded(dm, cds, threadPool);
int curStart = 0;
for (int i = 0; i < priors.length; i++)
{
int classCount = cds.classSampleCount(i);
classVC.add(vc.clone());
classVC.get(i).build(threadPool != null, allVecs, dm);
curStart += classCount;
}
final int m = iterations;
final int n = neighbors;
final int toUse = threadPool == null ? 1 : SystemInfo.LogicalCores;
if(threadPool == null)
threadPool = new FakeExecutor();
final int blockSize = m/toUse;
final CountDownLatch latch = new CountDownLatch(toUse);
for(int id = 0; id < toUse; id++)
{
final int mm;
if(id < m%toUse)
mm = blockSize+1;
else
mm = blockSize;
threadPool.submit(new Runnable()
{
@Override
public void run()
{
double[] wLocal = new double[w.length];
Random rand = RandomUtil.getRandom();
for(int iter = 0; iter < mm; iter++)
{
final int k = rand.nextInt(cds.size());
final Vec x_k = allVecs.get(k);
final int y_k = cds.getDataPointCategory(k);
for (int y = 0; y < priors.length; y++)//# classes = C
{
int searchFor = y == y_k ? n + 1 : n;//+1 so we dont search for ourselves
List<? extends VecPaired<Vec, Double>> nNearestC = classVC.get(y).search(x_k, searchFor);
if (searchFor != n)
nNearestC = nNearestC.subList(1, searchFor);//chop off the first value which is ourselves
for (int i = 0; i < w.length; i++)
for (VecPaired<Vec, Double> x_jy : nNearestC)// j loop
{
if (y == y_k)
wLocal[i] -= diff(i, x_k, x_jy.getVector(), normalizer)/(m*n);
else
wLocal[i] += priors[y]/(1-priors[y_k])*diff(i, x_k, x_jy.getVector(), normalizer)/(m*n);
}
}
}
synchronized(w)
{
for(int i = 0; i < w.length; i++)
w[i] += wLocal[i];
}
latch.countDown();
}
});
}
try
{
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(ReliefF.class.getName()).log(Level.SEVERE, null, ex);
}
IndexTable it = new IndexTable(w);
Set<Integer> numericalToRemove = new IntSet(w.length*2);
for(int i = 0; i < w.length-featureCount; i++)
numericalToRemove.add(it.index(i));
setUp(cds, Collections.EMPTY_SET, numericalToRemove);
}
/**
* Returns accesses to the learned weight data. Altering the values will be
* reflected in this original ReliefF object.
* @return access to the raw weight values
*/
public Vec getWeights()
{
return new DenseVector(w);
}
private double diff(int i, Vec xj, Vec xk, double[] normalzer)
{
if(normalzer[i] == 0)
return 0;
return Math.abs(xj.get(i) - xk.get(i))/normalzer[i];
}
@Override
public ReliefF clone()
{
return new ReliefF(this);
}
/**
* Sets the number of features to select for use from the set of all input
* features
*
* @param featureCount the number of features to use
*/
public void setFeatureCount(int featureCount)
{
if (featureCount < 1)
throw new IllegalArgumentException("Number of features to select must be positive, not " + featureCount);
this.featureCount = featureCount;
}
/**
* Returns the number of features to sue
*
* @return the number of features to sue
*/
public int getFeatureCount()
{
return featureCount;
}
/**
* Sets the number of iterations of the ReliefF algorithm that will be run
*
* @param iterations the number of iterations to run
*/
public void setIterations(int iterations)
{
if (iterations < 1)
throw new IllegalArgumentException("Number of iterations must be positive, not " + iterations);
this.iterations = iterations;
}
/**
* Returns the number of iterations to use
*
* @return the number of iterations to use
*/
public int getIterations()
{
return iterations;
}
/**
* Sets the number of neighbors to use to infer feature importance from
*
* @param neighbors the number of neighbors to use
*/
public void setNeighbors(int neighbors)
{
if (neighbors < 1)
throw new IllegalArgumentException("Number of neighbors must be positive, not " + neighbors);
this.neighbors = neighbors;
}
/**
* Returns the number of neighbors that will be used at each step of the
* algorithm.
*
* @return the number of neighbors that will be used
*/
public int getNeighbors()
{
return neighbors;
}
/**
* Sets the distance metric to infer the feature importance with
*
* @param dm the distance metric to use
*/
public void setDistanceMetric(DistanceMetric dm)
{
this.dm = dm;
}
/**
* Returns the distance metric to use
*
* @return the distance metric to use
*/
public DistanceMetric getDistanceMetric()
{
return dm;
}
}
| 15,662 | 36.028369 | 175 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/featureselection/SBS.java | package jsat.datatransform.featureselection;
import java.util.*;
import jsat.DataSet;
import jsat.classifiers.*;
import jsat.datatransform.*;
import static jsat.datatransform.featureselection.SFS.addFeature;
import static jsat.datatransform.featureselection.SFS.removeFeature;
import jsat.regression.RegressionDataSet;
import jsat.regression.Regressor;
import jsat.utils.IntList;
import jsat.utils.IntSet;
import jsat.utils.ListUtils;
import jsat.utils.random.RandomUtil;
/**
* Sequential Backward Selection (SBS) is a greedy method of selecting a subset
* of features to use for prediction. It starts from the set of all features and
* attempts to remove the least informative feature from the set at each
* iteration
*
* @author Edward Raff
*/
public class SBS extends RemoveAttributeTransform
{
private static final long serialVersionUID = -2516121100148559742L;
private double maxDecrease;
private int folds;
private int minFeatures, maxFeatures;
private Object evaluator;
/**
* Copy constructor
* @param toClone the version to copy
*/
private SBS(SBS toClone)
{
super(toClone);
this.maxDecrease = toClone.maxDecrease;
this.folds = toClone.folds;
this.minFeatures = toClone.minFeatures;
this.maxFeatures = toClone.maxFeatures;
this.evaluator = toClone.evaluator;
}
/**
* Performs SBS feature selection for a classification problem
*
* @param minFeatures the minimum number of features to find
* @param maxFeatures the maximum number of features to find
* @param evaluater the classifier to use in determining accuracy given a
* feature subset
* @param folds the number of cross validation folds to use in selection
* @param maxDecrease the maximum tolerable decrease in accuracy in accuracy
* when a feature is removed
*/
public SBS(int minFeatures, int maxFeatures, Classifier evaluater, double maxDecrease)
{
this(minFeatures, maxFeatures, evaluater, 3, maxDecrease);
}
private SBS(int minFeatures, int maxFeatures, Object evaluater, int folds, double maxDecrease)
{
super();
setMaxDecrease(maxDecrease);
setMinFeatures(minFeatures);
setMaxFeatures(maxFeatures);
setEvaluator(evaluater);
setFolds(folds);
}
/**
* Performs SBS feature selection for a classification problem
*
* @param minFeatures the minimum number of features to find
* @param maxFeatures the maximum number of features to find
* @param cds the data set to perform feature selection on
* @param evaluater the classifier to use in determining accuracy given a
* feature subset
* @param folds the number of cross validation folds to use in selection
* @param maxDecrease the maximum tolerable decrease in accuracy in accuracy
* when a feature is removed
*/
public SBS(int minFeatures, int maxFeatures, ClassificationDataSet cds, Classifier evaluater, int folds, double maxDecrease)
{
this(minFeatures, maxFeatures, evaluater, folds, maxDecrease);
search(cds, evaluater, minFeatures, maxFeatures, folds);
}
/**
* Performs SBS feature selection for a regression problem
*
* @param minFeatures the minimum number of features to find
* @param maxFeatures the maximum number of features to find
* @param evaluater the regressor to use in determining accuracy given a
* feature subset
* @param maxDecrease the maximum tolerable increase in the error rate when
* a feature is removed
*/
public SBS(int minFeatures, int maxFeatures, Regressor evaluater, double maxDecrease)
{
this(minFeatures, maxFeatures, evaluater, 3, maxDecrease);
}
/**
* Performs SBS feature selection for a regression problem
*
* @param minFeatures the minimum number of features to find
* @param maxFeatures the maximum number of features to find
* @param rds the data set to perform feature selection on
* @param evaluater the regressor to use in determining accuracy given a
* feature subset
* @param folds the number of cross validation folds to use in selection
* @param maxDecrease the maximum tolerable increase in the error rate when
* a feature is removed
*/
public SBS(int minFeatures, int maxFeatures, RegressionDataSet rds, Regressor evaluater, int folds, double maxDecrease)
{
this(minFeatures, maxFeatures, evaluater, folds, maxDecrease);
search(rds, evaluater, minFeatures, maxFeatures, folds);
}
@Override
public void fit(DataSet data)
{
search(data, evaluator, minFeatures, maxFeatures, folds);
}
private void search(DataSet dataSet, Object learner, int minFeatures, int maxFeatures, int folds)
{
Random rand = RandomUtil.getRandom();
int nF = dataSet.getNumFeatures();
int nCat = dataSet.getNumCategoricalVars();
Set<Integer> available = new IntSet();
ListUtils.addRange(available, 0, nF, 1);
Set<Integer> catSelected = new IntSet(dataSet.getNumCategoricalVars());
Set<Integer> numSelected = new IntSet(dataSet.getNumNumericalVars());
Set<Integer> catToRemove = new IntSet(dataSet.getNumCategoricalVars());
Set<Integer> numToRemove = new IntSet(dataSet.getNumNumericalVars());
//Start will all selected, and prune them out
ListUtils.addRange(catSelected, 0, nCat, 1);
ListUtils.addRange(numSelected, 0, nF-nCat, 1);
double[] bestScore = new double[]{Double.POSITIVE_INFINITY};
while(catSelected.size() + numSelected.size() > minFeatures)
{
if(SBSRemoveFeature(available, dataSet, catToRemove, numToRemove,
catSelected, numSelected, learner, folds, rand,
maxFeatures, bestScore, maxDecrease) < 0)
break;
}
int pos = 0;
catIndexMap = new int[catSelected.size()];
for(int i : catSelected)
catIndexMap[pos++] = i;
Arrays.sort(catIndexMap);
pos = 0;
numIndexMap = new int[numSelected.size()];
for(int i : numSelected)
numIndexMap[pos++] = i;
Arrays.sort(numIndexMap);
}
@Override
public SBS clone()
{
return new SBS(this);
}
/**
* Returns a copy of the set of categorical features selected by the search
* algorithm
*
* @return the set of categorical features to use
*/
public Set<Integer> getSelectedCategorical()
{
return new IntSet(IntList.view(catIndexMap, catIndexMap.length));
}
/**
* Returns a copy of the set of numerical features selected by the search
* algorithm.
*
* @return the set of numeric features to use
*/
public Set<Integer> getSelectedNumerical()
{
return new IntSet(IntList.view(numIndexMap, numIndexMap.length));
}
/**
* Attempts to remove one feature from the list while maintaining its
* accuracy
*
* @param available the set of available features from [0, n) to consider
* for removal
* @param dataSet the original data set to perform feature selection from
* @param catToRemove the current set of categorical features to remove
* @param numToRemove the current set of numerical features to remove
* @param catSelecteed the current set of categorical features we are keeping
* @param numSelected the current set of numerical features we are keeping
* @param evaluater the classifier or regressor to perform evaluations with
* @param folds the number of cross validation folds to determine performance
* @param rand the source of randomness
* @param maxFeatures the maximum allowable number of features
* @param PbestScore an array to behave as a pointer to the best score seen
* so far
* @param maxDecrease the maximum allowable decrease in accuracy from the
* best accuracy we see
* @return the feature that was selected to be removed, or -1 if none were
* removed
*/
protected static int SBSRemoveFeature(Set<Integer> available, DataSet dataSet,
Set<Integer> catToRemove, Set<Integer> numToRemove,
Set<Integer> catSelecteed, Set<Integer> numSelected,
Object evaluater, int folds, Random rand, int maxFeatures,
double[] PbestScore, double maxDecrease)
{
int curBest = -1;
int nCat = dataSet.getNumCategoricalVars();
double curBestScore = Double.POSITIVE_INFINITY;
for(int feature : available)
{
DataSet workOn = dataSet.shallowClone();
addFeature(feature, nCat, catToRemove, numToRemove);
RemoveAttributeTransform remove = new RemoveAttributeTransform(workOn, catToRemove, numToRemove);
workOn.applyTransform(remove);
double score = SFS.getScore(workOn, evaluater, folds, rand);
if(score < curBestScore)
{
curBestScore = score;
curBest = feature;
}
removeFeature(feature, nCat, catToRemove, numToRemove);
}
if (catSelecteed.size() + numSelected.size() > maxFeatures
|| PbestScore[0] - curBestScore > -maxDecrease)
{
PbestScore[0] = curBestScore;
removeFeature(curBest, nCat, catSelecteed, numSelected);
addFeature(curBest, nCat, catToRemove, numToRemove);
available.remove(curBest);
return curBest;
}
else
return -1; //No possible improvment & weve got enough
}
/**
* Sets the maximum allowable decrease in accuracy (increase in error) from
* the previous set of features to the new current set.
*
* @param maxDecrease the maximum allowable decrease in the accuracy from
* removing a feature
*/
public void setMaxDecrease(double maxDecrease)
{
if (maxDecrease < 0)
throw new IllegalArgumentException("Decarese must be a positive value, not " + maxDecrease);
this.maxDecrease = maxDecrease;
}
/**
* Returns the maximum allowable decrease in accuracy from one set of
* features to the next
*
* @return the maximum allowable decrease in accuracy from one set of
* features to the next
*/
public double getMaxDecrease()
{
return maxDecrease;
}
/**
* Sets the minimum number of features that must be selected
*
* @param minFeatures the minimum number of features to learn
*/
public void setMinFeatures(int minFeatures)
{
this.minFeatures = minFeatures;
}
/**
* Returns the minimum number of features to find
*
* @return the minimum number of features to find
*/
public int getMinFeatures()
{
return minFeatures;
}
/**
* Sets the maximum number of features that must be selected
*
* @param maxFeatures the maximum number of features to find
*/
public void setMaxFeatures(int maxFeatures)
{
this.maxFeatures = maxFeatures;
}
/**
* Returns the maximum number of features to find
*
* @return the maximum number of features to find
*/
public int getMaxFeatures()
{
return maxFeatures;
}
/**
* Sets the number of folds to use for cross validation when estimating the error rate
* @param folds the number of folds to use for cross validation when estimating the error rate
*/
public void setFolds(int folds)
{
if(folds <= 0 )
throw new IllegalArgumentException("Number of CV folds must be positive, not " + folds);
this.folds = folds;
}
/**
*
* @return the number of folds to use for cross validation when estimating the error rate
*/
public int getFolds()
{
return folds;
}
private void setEvaluator(Object evaluator)
{
this.evaluator = evaluator;
}
}
| 12,338 | 33.757746 | 128 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/featureselection/SFS.java | package jsat.datatransform.featureselection;
import java.util.*;
import jsat.DataSet;
import jsat.classifiers.*;
import jsat.datatransform.*;
import jsat.regression.*;
import jsat.utils.IntSet;
import jsat.utils.ListUtils;
import jsat.utils.random.RandomUtil;
/**
* Sequential Forward Selection (SFS) is a greedy method of selecting a subset
* of features to use for prediction. It starts from the set of no features and
* attempts to add the next best feature to the set at each iteration.
*
* @author Edward Raff
*/
public class SFS implements DataTransform
{
private static final long serialVersionUID = 140187978708131002L;
private RemoveAttributeTransform finalTransform;
private Set<Integer> catSelected;
private Set<Integer> numSelected;
private double maxIncrease;
private Classifier classifier;
private Regressor regressor;
private int minFeatures, maxFeatures;
private int folds;
private Object evaluator;
/**
* Copy constructor
* @param toClone the SFS to copy
*/
private SFS(SFS toClone)
{
if(toClone.catSelected != null)
{
this.finalTransform = toClone.finalTransform.clone();
this.catSelected = new IntSet(toClone.catSelected);
this.numSelected = new IntSet(toClone.numSelected);
}
this.maxIncrease = toClone.maxIncrease;
this.folds = toClone.folds;
this.minFeatures = toClone.minFeatures;
this.maxFeatures = toClone.maxFeatures;
this.evaluator = toClone.evaluator;
if (toClone.classifier != null)
this.classifier = toClone.classifier.clone();
if (toClone.regressor != null)
this.regressor = toClone.regressor.clone();
}
/**
* Performs SFS feature selection for a classification problem
*
* @param minFeatures the minimum number of features to find
* @param maxFeatures the maximum number of features to find
* @param evaluater the classifier to use in determining accuracy given a
* feature subset
* @param maxIncrease the maximum tolerable increase in error when a feature
* is added
*/
public SFS(int minFeatures, int maxFeatures, Classifier evaluater, double maxIncrease)
{
this(minFeatures, maxFeatures, evaluater.clone(), 3, maxIncrease);
}
/**
* Performs SFS feature selection for a classification problem
*
* @param minFeatures the minimum number of features to find
* @param maxFeatures the maximum number of features to find
* @param dataSet the data set to perform feature selection on
* @param evaluater the classifier to use in determining accuracy given a
* feature subset
* @param folds the number of cross validation folds to use in selection
* @param maxIncrease the maximum tolerable increase in error when a feature
* is added
*/
public SFS(int minFeatures, int maxFeatures, ClassificationDataSet dataSet, Classifier evaluater, int folds, double maxIncrease)
{
this(minFeatures, maxFeatures, evaluater.clone(), folds, maxIncrease);
search(minFeatures, maxFeatures, dataSet, folds);
}
/**
* Creates SFS feature selection for a regression problem
*
* @param minFeatures the minimum number of features to find
* @param maxFeatures the maximum number of features to find
* @param regressor the regressor to use in determining accuracy given a
* feature subset
* @param maxIncrease the maximum tolerable increase in error when a feature
* is added
*/
public SFS(int minFeatures, int maxFeatures, Regressor regressor, double maxIncrease)
{
this(minFeatures, maxFeatures, regressor.clone(), 3, maxIncrease);
}
/**
* Performs SFS feature selection for a regression problem
*
* @param minFeatures the minimum number of features to find
* @param maxFeatures the maximum number of features to find
* @param dataSet the data set to perform feature selection on
* @param regressor the regressor to use in determining accuracy given a
* feature subset
* @param folds the number of cross validation folds to use in selection
* @param maxIncrease the maximum tolerable increase in error when a feature
* is added
*/
public SFS(int minFeatures, int maxFeatures, RegressionDataSet dataSet, Regressor regressor, int folds, double maxIncrease)
{
this(minFeatures, maxFeatures, regressor.clone(), folds, maxIncrease);
search(minFeatures, maxFeatures, dataSet, folds);
}
private SFS(int minFeatures, int maxFeatures, Object evaluator, int folds, double maxIncrease)
{
setMinFeatures(minFeatures);
setMaxFeatures(maxFeatures);
setFolds(folds);
setMaxIncrease(maxIncrease);
setEvaluator(evaluator);
}
@Override
public void fit(DataSet data)
{
search(minFeatures, maxFeatures, data, minFeatures);
}
private void search(int minFeatures, int maxFeatures, DataSet dataSet, int folds)
{
Random rand = RandomUtil.getRandom();
int nF = dataSet.getNumFeatures();
int nCat = dataSet.getNumCategoricalVars();
Set<Integer> available = new IntSet();
ListUtils.addRange(available, 0, nF, 1);
catSelected = new IntSet(dataSet.getNumCategoricalVars());
numSelected = new IntSet(dataSet.getNumNumericalVars());
Set<Integer> catToRemove = new IntSet(dataSet.getNumCategoricalVars());
Set<Integer> numToRemove = new IntSet(dataSet.getNumNumericalVars());
ListUtils.addRange(catToRemove, 0, nCat, 1);
ListUtils.addRange(numToRemove, 0, nF-nCat, 1);
double[] bestScore = new double[]{Double.POSITIVE_INFINITY};
Object learner = regressor;
if (dataSet instanceof ClassificationDataSet)
learner = classifier;
while (catSelected.size() + numSelected.size() < maxFeatures)
{
if (SFSSelectFeature(available, dataSet,
catToRemove, numToRemove, catSelected, numSelected,
learner, folds, rand, bestScore, minFeatures) < 0)
break;
}
this.finalTransform = new RemoveAttributeTransform(dataSet, catToRemove, numToRemove);
}
/**
*
* @param curBest the value of curBest
* @param nCat the value of nCat
* @param catF the value of catF
* @param numF the value of numF
*/
static protected void addFeature(int curBest, int nCat, Set<Integer> catF, Set<Integer> numF)
{
if(curBest >= nCat)
numF.add(curBest-nCat);
else
catF.add(curBest);
}
/**
*
* @param feature the value of feature
* @param nCat the value of nCat
* @param catF the value of catF
* @param numF the value of numF
*/
static protected void removeFeature(int feature, int nCat, Set<Integer> catF, Set<Integer> numF)
{
if(feature >= nCat)
numF.remove(feature-nCat);
else
catF.remove(feature);
}
@Override
public DataPoint transform(DataPoint dp)
{
return finalTransform.transform(dp);
}
@Override
public SFS clone()
{
return new SFS(this);
}
/**
* Returns a copy of the set of categorical features selected by the search
* algorithm
*
* @return the set of categorical features to use
*/
public Set<Integer> getSelectedCategorical()
{
return new IntSet(catSelected);
}
/**
* Returns a copy of the set of numerical features selected by the search
* algorithm.
*
* @return the set of numeric features to use
*/
public Set<Integer> getSelectedNumerical()
{
return new IntSet(numSelected);
}
/**
* Attempts to add one feature to the list of features while increasing or
* maintaining the current accuracy
*
* @param available the set of available features from [0, n) to consider
* for adding
* @param dataSet the original data set to perform feature selection from
* @param catToRemove the current set of categorical features to remove
* @param numToRemove the current set of numerical features to remove
* @param catSelecteed the current set of categorical features we are keeping
* @param numSelected the current set of numerical features we are keeping
* @param evaluater the classifier or regressor to perform evaluations with
* @param folds the number of cross validation folds to determine performance
* @param rand the source of randomness
* @param PbestScore an array to behave as a pointer to the best score seen
* so far
* @param minFeatures the minimum number of features needed
* @return the feature that was selected to add, or -1 if none were added.
*/
static protected int SFSSelectFeature(Set<Integer> available,
DataSet dataSet, Set<Integer> catToRemove, Set<Integer> numToRemove,
Set<Integer> catSelecteed, Set<Integer> numSelected,
Object evaluater, int folds, Random rand, double[] PbestScore,
int minFeatures)
{
int nCat = dataSet.getNumCategoricalVars();
int curBest = -1;
double curBestScore = Double.POSITIVE_INFINITY;
for(int feature : available)
{
removeFeature(feature, nCat, catToRemove, numToRemove);
DataSet workOn = dataSet.shallowClone();
RemoveAttributeTransform remove = new RemoveAttributeTransform(workOn, catToRemove, numToRemove);
workOn.applyTransform(remove);
double score = getScore(workOn, evaluater, folds, rand);
if(score < curBestScore)
{
curBestScore = score;
curBest = feature;
}
addFeature(feature, nCat, catToRemove, numToRemove);
}
if(curBestScore <= 1e-14 && PbestScore[0] <= 1e-14
&& catSelecteed.size() + numSelected.size() >= minFeatures )
return -1;
if (curBestScore < PbestScore[0]
|| catSelecteed.size() + numSelected.size() < minFeatures
|| Math.abs(PbestScore[0]-curBestScore) < 1e-3)
{
PbestScore[0] = curBestScore;
addFeature(curBest, nCat, catSelecteed, numSelected);
removeFeature(curBest, nCat, catToRemove, numToRemove);
available.remove(curBest);
return curBest;
}
else
return -1; //No possible improvment & weve got enough
}
/**
* The score function for a data set and a learner by cross validation of a
* classifier
*
* @param workOn the transformed data set to test from with cross validation
* @param evaluater the learning algorithm to use
* @param folds the number of cross validation folds to perform
* @param rand the source of randomness
* @return the score value in terms of cross validated error
*/
protected static double getScore(DataSet workOn, Object evaluater, int folds, Random rand)
{
if(workOn instanceof ClassificationDataSet)
{
ClassificationModelEvaluation cme =
new ClassificationModelEvaluation((Classifier)evaluater,
(ClassificationDataSet)workOn);
cme.evaluateCrossValidation(folds, rand);
return cme.getErrorRate();
}
else if(workOn instanceof RegressionDataSet)
{
RegressionModelEvaluation rme =
new RegressionModelEvaluation((Regressor)evaluater,
(RegressionDataSet)workOn);
rme.evaluateCrossValidation(folds, rand);
return rme.getMeanError();
}
return Double.POSITIVE_INFINITY;
}
/**
* Sets the maximum allowable the maximum tolerable increase in error when a
* feature is added
*
* @param maxIncrease the maximum allowable the maximum tolerable increase
* in error when a feature is added
*/
public void setMaxIncrease(double maxIncrease)
{
if (maxIncrease < 0)
throw new IllegalArgumentException("Decarese must be a positive value, not " + maxIncrease);
this.maxIncrease = maxIncrease;
}
/**
*
* @return the maximum allowable the maximum tolerable increase in error
* when a feature is added
*/
public double getMaxIncrease()
{
return maxIncrease;
}
/**
* Sets the minimum number of features that must be selected
*
* @param minFeatures the minimum number of features to learn
*/
public void setMinFeatures(int minFeatures)
{
this.minFeatures = minFeatures;
}
/**
* Returns the minimum number of features to find
*
* @return the minimum number of features to find
*/
public int getMinFeatures()
{
return minFeatures;
}
/**
* Sets the maximum number of features that must be selected
*
* @param maxFeatures the maximum number of features to find
*/
public void setMaxFeatures(int maxFeatures)
{
this.maxFeatures = maxFeatures;
}
/**
* Returns the maximum number of features to find
*
* @return the maximum number of features to find
*/
public int getMaxFeatures()
{
return maxFeatures;
}
/**
* Sets the number of folds to use for cross validation when estimating the error rate
* @param folds the number of folds to use for cross validation when estimating the error rate
*/
public void setFolds(int folds)
{
if(folds <= 0 )
throw new IllegalArgumentException("Number of CV folds must be positive, not " + folds);
this.folds = folds;
}
/**
*
* @return the number of folds to use for cross validation when estimating the error rate
*/
public int getFolds()
{
return folds;
}
private void setEvaluator(Object evaluator)
{
this.evaluator = evaluator;
if(evaluator instanceof Classifier)
this.classifier = (Classifier) evaluator;
if(evaluator instanceof Regressor)
this.regressor = (Regressor) evaluator;
}
}
| 14,643 | 33.456471 | 132 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/kernel/KernelPCA.java | package jsat.datatransform.kernel;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.datatransform.DataTransformBase;
import jsat.datatransform.PCA;
import jsat.datatransform.kernel.Nystrom.SamplingMethod;
import jsat.distributions.Distribution;
import jsat.distributions.discrete.UniformDiscrete;
import jsat.distributions.kernels.KernelTrick;
import jsat.distributions.kernels.RBFKernel;
import jsat.linear.DenseMatrix;
import jsat.linear.DenseVector;
import jsat.linear.EigenValueDecomposition;
import jsat.linear.Matrix;
import jsat.linear.RowColumnOps;
import jsat.linear.Vec;
import jsat.parameters.Parameter.ParameterHolder;
import jsat.utils.random.RandomUtil;
/**
* A kernelized implementation of {@link PCA}. Because this works in a different
* feature space, it will do its own centering in the kernel space.
* <br><br>
* KernelPCA is expensive to compute at O(n<sup>3</sup>) work, where <i>n</i> is
* the number of data points. For this reason, sampling from {@link Nystrom} is
* used to reduce the data set to a reasonable approximation.
* <br><br>
* See: Schölkopf, B., Smola, A.,&Müller, K.-R. (1998). <i>Nonlinear Component
* Analysis as a Kernel Eigenvalue Problem</i>. Neural Computation, 10(5),
* 1299–1319. doi:10.1162/089976698300017467
*
* @author Edward Raff
* @see Nystrom.SamplingMethod
*/
public class KernelPCA extends DataTransformBase
{
private static final long serialVersionUID = 5676602024560381023L;
/**
* The dimension to project down to
*/
private int dimensions;
@ParameterHolder
private KernelTrick k;
private int basisSize;
private Nystrom.SamplingMethod samplingMethod;
private double[] eigenVals;
/**
* The matrix of transformed eigen vectors
*/
private Matrix eigenVecs;
/**
* The vecs used for the transform
*/
private Vec[] vecs;
//row / colum info for centering in the feature space
private double[] rowAvg;
private double allAvg;
/**
* Creates a new Kernel PCA transform object using the
* {@link RBFKernel RBF Kernel} and 100 dimensions
*
*/
public KernelPCA()
{
this(100);
}
/**
* Creates a new Kernel PCA transform object using the
* {@link RBFKernel RBF Kernel}
*
* @param dimensions the number of dimensions to project down to. Must be
* less than than the basis size
*/
public KernelPCA(int dimensions)
{
this(new RBFKernel(), dimensions);
}
/**
* Creates a new Kernel PCA transform object
*
* @param k the kernel trick to use
* @param dimensions the number of dimensions to project down to. Must be
* less than than the basis size
*/
public KernelPCA(KernelTrick k, int dimensions)
{
this(k, dimensions, 1000, SamplingMethod.UNIFORM);
}
/**
* Creates a new Kernel PCA transform object
* @param k the kernel trick to use
* @param dimensions the number of dimensions to project down to. Must be
* less than than the basis size
* @param basisSize the number of points from the data set to select. If
* larger than the number of data points in the data set, the whole data set
* will be used.
* @param samplingMethod the sampling method to select the basis vectors
*/
public KernelPCA(KernelTrick k, int dimensions, int basisSize, Nystrom.SamplingMethod samplingMethod)
{
setDimensions(dimensions);
setKernel(k);
setBasisSize(basisSize);
setBasisSamplingMethod(samplingMethod);
}
/**
* Creates a new Kernel PCA transform object
* @param k the kernel trick to use
* @param ds the data set to form the data transform from
* @param dimensions the number of dimensions to project down to. Must be
* less than than the basis size
* @param basisSize the number of points from the data set to select. If
* larger than the number of data points in the data set, the whole data set
* will be used.
* @param samplingMethod the sampling method to select the basis vectors
*/
public KernelPCA(KernelTrick k, DataSet ds, int dimensions, int basisSize, Nystrom.SamplingMethod samplingMethod)
{
this(k, dimensions, basisSize, samplingMethod);
fit(ds);
}
@Override
public void fit(DataSet ds)
{
if(ds.size() <= basisSize)
{
vecs = new Vec[ds.size()];
for(int i = 0; i < vecs.length; i++)
vecs[i] = ds.getDataPoint(i).getNumericalValues();
}
else
{
int i = 0;
List<Vec> sample = Nystrom.sampleBasisVectors(k, ds, ds.getDataVectors(), samplingMethod, basisSize, false, RandomUtil.getRandom());
vecs = new Vec[sample.size()];
for(Vec v : sample)
vecs[i++] = v;
}
Matrix K = new DenseMatrix(vecs.length, vecs.length);
//Info used to compute centered Kernel matrix
rowAvg = new double[K.rows()];
allAvg = 0;
for(int i = 0; i < K.rows(); i++)
{
Vec x_i = vecs[i];
for(int j = i; j < K.cols(); j++)
{
double K_ij = k.eval(x_i, vecs[j]);
K.set(i, j, K_ij);
K.set(j, i, K_ij);//K = K'
}
}
//Get row / col info to perform centering. Since K is symetric, the row
//and col info are the same
for(int i = 0; i < K.rows(); i++)
for(int j = 0; j < K.cols(); j++)
rowAvg[i] += K.get(i, j);
for (int i = 0; i < K.rows(); i++)
{
allAvg += rowAvg[i];
rowAvg[i] /= K.rows();
}
allAvg /= (K.rows()*K.cols());
//Centered version of the marix
//K_c(i, j) = K_ij - sum_z K_zj / m - sum_z K_iz / m + sum_{z,y} K_zy / m^2
for(int i = 0; i < K.rows(); i++)
for(int j = 0; j < K.cols(); j++)
K.set(i, j, K.get(i, j) - rowAvg[i] - rowAvg[j] + allAvg);
EigenValueDecomposition evd = new EigenValueDecomposition(K);
evd.sortByEigenValue(new Comparator<Double>()
{
@Override
public int compare(Double o1, Double o2)
{
return -Double.compare(o1, o2);
}
});
eigenVals = evd.getRealEigenvalues();
eigenVecs = evd.getV();
for(int j = 0; j < eigenVals.length; j++)//TODO row order would be more cache friendly
RowColumnOps.divCol(eigenVecs, j, Math.sqrt(eigenVals[j]));
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
protected KernelPCA(KernelPCA toCopy)
{
this.dimensions = toCopy.dimensions;
this.k = toCopy.k.clone();
this.basisSize = toCopy.basisSize;
this.samplingMethod = toCopy.samplingMethod;
if(toCopy.eigenVals != null)
this.eigenVals = Arrays.copyOf(toCopy.eigenVals, toCopy.eigenVals.length);
if(toCopy.eigenVecs != null)
this.eigenVecs = toCopy.eigenVecs.clone();
if(toCopy.vecs != null)
{
this.vecs = new Vec[toCopy.vecs.length];
for(int i = 0; i < vecs.length; i++)
this.vecs[i] = toCopy.vecs[i].clone();
this.rowAvg = Arrays.copyOf(toCopy.rowAvg, toCopy.rowAvg.length);
}
this.allAvg = toCopy.allAvg;
}
@Override
public DataPoint transform(DataPoint dp)
{
Vec oldVec = dp.getNumericalValues();
Vec newVec = new DenseVector(dimensions);
//TODO put this in a thread local object? Or hope JVM puts a large array on the stack?
final double[] kEvals = new double[vecs.length];
double tAvg = 0;
for (int j = 0; j < vecs.length; j++)
tAvg += (kEvals[j] = k.eval(vecs[j], oldVec));
tAvg /= vecs.length;
for (int i = 0; i < dimensions; i++)
{
double val = 0;
for (int j = 0; j < vecs.length; j++)
val += eigenVecs.get(j, i) * (kEvals[j] - tAvg - rowAvg[i] + allAvg);
newVec.set(i, val);
}
return new DataPoint(newVec, dp.getCategoricalValues(), dp.getCategoricalData());
}
@Override
public KernelPCA clone()
{
return new KernelPCA(this);
}
/**
*
* @param k the kernel trick to use
*/
public void setKernel(KernelTrick k)
{
this.k = k;
}
/**
*
* @return the kernel trick to use
*/
public KernelTrick getKernel()
{
return k;
}
/**
* Sets the basis size for the Kernel PCA to be learned from. Increasing the
* basis increase the accuracy of the transform, but increased the training
* time at a cubic rate.
*
* @param basisSize the number of basis vectors to build Kernel PCA from
*/
public void setBasisSize(int basisSize)
{
if (basisSize < 1)
throw new IllegalArgumentException("The basis size must be positive, not " + basisSize);
this.basisSize = basisSize;
}
/**
* Returns the number of basis vectors to use
*
* @return the number of basis vectors to use
*/
public int getBasisSize()
{
return basisSize;
}
/**
* Sets the dimension of the new feature space, which is the number of
* principal components to select from the kernelized feature space.
*
* @param dimensions the number of dimensions to project down too
*/
public void setDimensions(int dimensions)
{
if (dimensions < 1)
throw new IllegalArgumentException("The number of dimensions must be positive, not " + dimensions);
this.dimensions = dimensions;
}
/**
* Returns the number of dimensions to project down too
*
* @return the number of dimensions to project down too
*/
public int getDimensions()
{
return dimensions;
}
/**
* Sets the method of selecting the basis vectors
*
* @param method the method of selecting the basis vectors
*/
public void setBasisSamplingMethod(SamplingMethod method)
{
this.samplingMethod = method;
}
/**
* Returns the method of selecting the basis vectors
*
* @return the method of selecting the basis vectors
*/
public SamplingMethod getBasisSamplingMethod()
{
return samplingMethod;
}
public static Distribution guessDimensions(DataSet d)
{
return new UniformDiscrete(20, 200);
}
}
| 10,909 | 29.560224 | 144 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/kernel/Nystrom.java | package jsat.datatransform.kernel;
import java.util.*;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.clustering.kmeans.HamerlyKMeans;
import jsat.clustering.SeedSelectionMethods;
import jsat.datatransform.*;
import jsat.distributions.kernels.KernelTrick;
import jsat.distributions.kernels.RBFKernel;
import jsat.linear.*;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.parameters.Parameter.ParameterHolder;
import jsat.utils.DoubleList;
import jsat.utils.IntSet;
import jsat.utils.random.RandomUtil;
/**
* An implementation of the Nystrom approximation for any Kernel Trick. The full
* rank kernel is approximated by a basis set of a subset of the data points
* that make up the original data set. Instead of explicitly forming the
* approximately decomposed matrix, this transform projects the original numeric
* features of a data set into a new feature space where the dot product in the
* linear space approximates the dot product in the given kernel space.
* <br><br>
* See: <br>
* <ul>
* <li>Williams, C.,&Seeger, M. (2001). <i>Using the Nyström Method to Speed
* Up Kernel Machines</i>. Advances in Neural Information Processing Systems 13
* (pp. 682–688). MIT Press. Retrieved from
* <a href="http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.18.7519">
* here</a></li>
* <li>Yang, T., Li, Y.-F., Mahdavi, M., Jin, R.,&Zhou, Z.-H. (2012). <i>
* Nystrom Method vs Random Fourier Features A Theoretical and Empirical
* Comparison</i>. Advances in Neural Information Processing Systems
* (pp. 485–493). Retrieved from
* <a href="http://books.nips.cc/papers/files/nips25/NIPS2012_0248.txt">here</a>
* </li>
* <li>Kumar, S., Mohri, M.,&Talwalkar, A. (2012). <i>Sampling methods for the
* Nyström method</i>. The Journal of Machine Learning Research, 5, 981–1006.
* Retrieved from <a href="http://dl.acm.org/citation.cfm?id=2343678">here</a>
* </li>
* </ul>
* @author Edward Raff
*/
public class Nystrom extends DataTransformBase
{
private static final long serialVersionUID = -3227844260130709773L;
private double ridge;
@ParameterHolder
private KernelTrick k;
private int dimension;
private SamplingMethod method;
int basisSize;
private boolean sampleWithReplacment;
private List<Vec> basisVecs;
private List<Double> accelCache;
private Matrix transform;
/**
* Different sample methods may be used to select a better and more
* representative set of vectors to form the basis vectors at increased
* cost, where {@code n} is the number of data points in the full data set
* and {@code b} is the number of basis vectors to obtain.
*/
public enum SamplingMethod
{
/**
* Selects the basis vectors by uniform sampling, takes O(b) time
*/
UNIFORM,
/**
* Selects the basis vectors by a weighted probability of the kernel
* value of k(x<sub>i</sub>, x<sub>i</sub>) for each <i>i</i>. If a
* kernel returns 1 for all k(x<sub>i</sub>, x<sub>i</sub>), then this
* reduces the uniform sampling. Takes O(n) time
*/
DIAGONAL,
/**
* Selects the basis vectors by the weighted probability of the column
* norms of the gram matrix for each vector. Takes O(n<sup>2</sup>) time
*/
NORM,
/**
* Selects the basis vectors as the means of a k-means clustering. Takes
* the time needed to perform k-means
*/
KMEANS,
}
/**
* Creates a new Nystrom approximation object
* @param k the kernel trick to form an approximation of
* @param dataset the data set to form the approximate feature space from
* @param basisSize the number of basis vectors to use, this is the output
* dimension size.
* @param method what sampling method should be used to select the basis
* vectors from the full data set.
*/
public Nystrom(KernelTrick k, DataSet dataset, int basisSize, SamplingMethod method)
{
this(k, dataset, basisSize, method, 0.0, false);
}
/**
* Creates a new Nystrom approximation object using the
* {@link RBFKernel RBF Kernel} with 500 basis vectors
*
*/
public Nystrom()
{
this(new RBFKernel(), 500);
}
/**
* Creates a new Nystrom approximation object
*
* @param k the kernel trick to form an approximation of
* @param basisSize the number of basis vectors to use, this is the output
* dimension size.
*/
public Nystrom(KernelTrick k, int basisSize)
{
this(k, basisSize, SamplingMethod.UNIFORM, 1e-5, false);
}
/**
* Creates a new Nystrom approximation object
* @param k the kernel trick to form an approximation of
* @param basisSize the number of basis vectors to use, this is the output
* dimension size.
* @param method what sampling method should be used to select the basis
* vectors from the full data set.
* @param ridge a non negative additive term to regularize the eigen values
* of the decomposition.
* @param sampleWithReplacment {@code true} if the basis vectors should be
* sampled with replacement, {@code false} if they should not.
*/
public Nystrom(KernelTrick k, int basisSize, SamplingMethod method, double ridge, boolean sampleWithReplacment )
{
setKernel(k);
setBasisSize(basisSize);
setBasisSamplingMethod(method);
setRidge(ridge);
this.sampleWithReplacment = sampleWithReplacment;
}
/**
* Creates a new Nystrom approximation object
* @param k the kernel trick to form an approximation of
* @param dataset the data set to form the approximate feature space from
* @param basisSize the number of basis vectors to use, this is the output
* dimension size.
* @param method what sampling method should be used to select the basis
* vectors from the full data set.
* @param ridge a non negative additive term to regularize the eigen values
* of the decomposition.
* @param sampleWithReplacment {@code true} if the basis vectors should be
* sampled with replacement, {@code false} if they should not.
*/
@SuppressWarnings("fallthrough")
public Nystrom(KernelTrick k, DataSet dataset, int basisSize, SamplingMethod method, double ridge, boolean sampleWithReplacment )
{
this(k, basisSize, method, ridge, sampleWithReplacment);
fit(dataset);
}
@Override
public void fit(DataSet dataset)
{
Random rand = RandomUtil.getRandom();
if(ridge < 0)
throw new IllegalArgumentException("ridge must be positive, not " + ridge);
final int N = dataset.size();
final int D = dataset.getNumNumericalVars();
final List<Vec> X = dataset.getDataVectors();
//Create smaller gram matrix K and decompose is
basisVecs = sampleBasisVectors(k, dataset, X, method, basisSize, sampleWithReplacment, rand);
accelCache = k.getAccelerationCache(basisVecs);
Matrix K = new DenseMatrix(basisSize, basisSize);
for (int i = 0; i < basisSize; i++)
{
K.set(i, i, k.eval(i, i, basisVecs, accelCache));
for (int j = i + 1; j < basisSize; j++)
{
double val = k.eval(i, j, basisVecs, accelCache);
K.set(i, j, val);
K.set(j, i, val);
}
}
//Decompose it
EigenValueDecomposition eig = new EigenValueDecomposition(K);
double[] eigenVals = eig.getRealEigenvalues();
DenseVector eigNorm = new DenseVector(eigenVals.length);
for (int i = 0; i < eigenVals.length; i++)
eigNorm.set(i, 1.0 / Math.sqrt(Math.max(1e-7, eigenVals[i]+ridge)));
//U * 1/sqrt(S)
Matrix U = eig.getV();
Matrix.diagMult(U, eigNorm);
transform = U.multiply(eig.getVRaw());
transform.mutableTranspose();
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
protected Nystrom(Nystrom toCopy)
{
this.k = toCopy.k.clone();
this.method = toCopy.method;
this.sampleWithReplacment = toCopy.sampleWithReplacment;
this.dimension = toCopy.dimension;
this.ridge = toCopy.ridge;
this.basisSize = toCopy.basisSize;
if(toCopy.basisVecs != null)
{
this.basisVecs = new ArrayList<Vec>(toCopy.basisVecs.size());
for(Vec v : toCopy.basisVecs)
this.basisVecs.add(v.clone());
if(toCopy.accelCache != null)
this.accelCache = new DoubleList(toCopy.accelCache);
}
if(toCopy.transform != null)
this.transform = toCopy.transform.clone();
}
/**
* Performs sampling of a data set for a subset of the vectors that make a
* good set of basis vectors for forming an approximation of a full kernel
* space. While these methods are motivated from Nystrom's algorithm, they
* are also useful for others.
*
* @param k the kernel trick to form the basis for
* @param dataset the data set to sample from
* @param X the list of vectors from the data set
* @param method the sampling method to use
* @param basisSize the number of basis vectors to select
* @param sampleWithReplacment whether or not the sample with replacement
* @param rand the source of randomness for the sampling
* @return a list of basis vectors sampled from the data set.
* @see SamplingMethod
*/
public static List<Vec> sampleBasisVectors(KernelTrick k, DataSet dataset, final List<Vec> X, SamplingMethod method, int basisSize, boolean sampleWithReplacment, Random rand)
{
List<Vec> basisVecs = new ArrayList<Vec>(basisSize);
final int N = dataset.size();
switch (method)
{
case DIAGONAL:
double[] diags = new double[N];
diags[0] = k.eval(X.get(0), X.get(0));
for (int i = 1; i < N; i++)
diags[i] = diags[i-1] + k.eval(X.get(i), X.get(i));
sample(basisSize, rand, diags, X, sampleWithReplacment, basisVecs);
break;
case NORM:
double[] norms = new double[N];
List<Vec> gramVecs = new ArrayList<Vec>();
for (int i = 0; i < N; i++)
gramVecs.add(new DenseVector(N));
List<Double> tmpCache = k.getAccelerationCache(X);
for (int i = 0; i < N; i++)
{
gramVecs.get(i).set(i, k.eval(i, i, X, tmpCache));
for (int j = i + 1; j < N; j++)
{
double val = k.eval(i, j, X, tmpCache);
gramVecs.get(i).set(j, val);
gramVecs.get(j).set(i, val);
}
}
norms[0] = gramVecs.get(0).pNorm(2);
for (int i = 1; i < gramVecs.size(); i++)
norms[i] = norms[i - 1] + gramVecs.get(i).pNorm(2);
sample(basisSize, rand, norms, X, sampleWithReplacment, basisVecs);
break;
case KMEANS:
HamerlyKMeans kMeans = new HamerlyKMeans(new EuclideanDistance(), SeedSelectionMethods.SeedSelection.KPP);
kMeans.setStoreMeans(true);
kMeans.cluster(dataset, basisSize);
basisVecs.addAll(kMeans.getMeans());
break;
case UNIFORM:
default:
if (sampleWithReplacment)
{
Set<Integer> sampled = new IntSet(basisSize);
while (sampled.size() < basisSize)
sampled.add(rand.nextInt(N));
for (int indx : sampled)
basisVecs.add(X.get(indx));
}
else
for (int i = 0; i < basisSize; i++)
basisVecs.add(X.get(rand.nextInt(N)));
}
return basisVecs;
}
/**
* Performs waited sampling on the cumulative sum of all values mapped to
* each vector. The sampled vectors will be placed directly into {@link #basisVecs}
* @param basisSize the number of basis vectors to sample for
* @param rand the source of randomness
* @param weightSume the cumulative weight sum
* @param X the list of vectors
* @param sampleWithReplacment whether or no to sample with replacement
* @param basisVecs the list to store the vecs in
*/
private static void sample(int basisSize, Random rand, double[] weightSume, List<Vec> X, boolean sampleWithReplacment, List<Vec> basisVecs)
{
Set<Integer> sampled = new IntSet(basisSize);
double max = weightSume[weightSume.length-1];
for(int i = 0; i < basisSize; i++)
{
double rndVal = rand.nextDouble()*max;
int indx = Arrays.binarySearch(weightSume, rndVal);
if(indx < 0)
indx = (-(indx) - 1);
if(sampleWithReplacment)//no need to do anything
basisVecs.add(X.get(indx));
else
{
int size = sampled.size();
sampled.add(indx);
if(sampled.size() == size)
i--;//do it again
else
basisVecs.add(X.get(indx));
}
}
}
@Override
public DataPoint transform(DataPoint dp)
{
Vec x = dp.getNumericalValues();
List<Double> qi = k.getQueryInfo(x);
Vec kVec = new DenseVector(basisVecs.size());
for(int i = 0; i < basisVecs.size(); i++)
kVec.set(i, k.eval(i, x, qi, basisVecs, accelCache));
return new DataPoint(kVec.multiply(transform), dp.getCategoricalValues(), dp.getCategoricalData());
}
@Override
public Nystrom clone()
{
return new Nystrom(this);
}
/**
* Sets the regularization parameter to add to the eigen values of the gram
* matrix. This can be particularly useful when using a large (500+) number
* of components.
*
* @param ridge the non-negative value in [0, ∞) to add to each eigen
* value
*/
public void setRidge(double ridge)
{
if (ridge < 0 || Double.isNaN(ridge) || Double.isInfinite(ridge))
throw new IllegalArgumentException("Ridge must be non negative, not " + ridge);
this.ridge = ridge;
}
/**
* Returns the regularization value added to each eigen value
*
* @return the regularization value added to each eigen value
*/
public double getRidge()
{
return ridge;
}
/**
* Sets the dimension of the new feature space, which is the number of
* principal components to select from the kernelized feature space.
*
* @param dimension the number of dimensions to project down too
*/
public void setDimension(int dimension)
{
if (dimension < 1)
throw new IllegalArgumentException("The number of dimensions must be positive, not " + dimension);
this.dimension = dimension;
}
/**
* Returns the number of dimensions to project down too
*
* @return the number of dimensions to project down too
*/
public int getDimension()
{
return dimension;
}
/**
* Sets the method of selecting the basis vectors
*
* @param method the method of selecting the basis vectors
*/
public void setBasisSamplingMethod(SamplingMethod method)
{
this.method = method;
}
/**
* Returns the method of selecting the basis vectors
*
* @return the method of selecting the basis vectors
*/
public SamplingMethod getBasisSamplingMethod()
{
return method;
}
/**
* Sets the basis size for the Kernel PCA to be learned from. Increasing the
* basis increase the accuracy of the transform, but increased the training
* time at a cubic rate.
*
* @param basisSize the number of basis vectors to build Kernel PCA from
*/
public void setBasisSize(int basisSize)
{
if (basisSize < 1)
throw new IllegalArgumentException("The basis size must be positive, not " + basisSize);
this.basisSize = basisSize;
}
/**
* Returns the number of basis vectors to use
*
* @return the number of basis vectors to use
*/
public int getBasisSize()
{
return basisSize;
}
/**
*
* @param k the kernel trick to use
*/
public void setKernel(KernelTrick k)
{
this.k = k;
}
/**
*
* @return the kernel trick to use
*/
public KernelTrick getKernel()
{
return k;
}
}
| 17,154 | 35.039916 | 178 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/kernel/RFF_RBF.java | package jsat.datatransform.kernel;
import java.util.Random;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.datatransform.DataTransformBase;
import jsat.distributions.Distribution;
import jsat.distributions.kernels.RBFKernel;
import jsat.linear.DenseVector;
import jsat.linear.Matrix;
import jsat.linear.RandomMatrix;
import jsat.linear.RandomVector;
import jsat.linear.Vec;
import jsat.utils.random.RandomUtil;
import jsat.utils.random.XORWOW;
/**
* An Implementation of Random Fourier Features for the {@link RBFKernel}. It
* transforms the numerical variables of a feature space to form a new feature
* space where the dot product between features approximates the RBF Kernel
* product.
* <br><br>
* See: Rahimi, A.,&Recht, B. (2007). <i>Random Features for Large-Scale
* Kernel Machines</i>. Neural Information Processing Systems. Retrieved from
* <a href="http://seattle.intel-research.net/pubs/rahimi-recht-random-features.pdf">
* here</a>
* @author Edward Raff
*/
public class RFF_RBF extends DataTransformBase
{
private static final long serialVersionUID = -3478216020648280477L;
private Matrix transform;
private Vec offsets;
private double sigma;
private int dim;
private boolean inMemory;
/**
* Creates a new RFF RBF object that will use an transformed feature space
* with a dimensionality of 512. This constructor should be used with a
* parameter search to find a good value for {@link #setSigma(double) sigma}
*/
public RFF_RBF()
{
this(1.0);
}
/**
* Creates a new RFF RBF object that will use an transformed feature space
* with a dimensionality of 512.
*
* @param sigma the positive sigma value for the {@link RBFKernel}
*/
public RFF_RBF(double sigma)
{
this(sigma, 512);
}
/**
* Creates a new RFF RBF object
*
* @param sigma the positive sigma value for the {@link RBFKernel}
* @param dim the new feature size dimension to project into.
*/
public RFF_RBF(double sigma, int dim)
{
this(sigma, dim, true);
}
/**
* Creates a new RFF RBF object
*
* @param sigma the positive sigma value for the {@link RBFKernel}
* @param dim the new feature size dimension to project into.
* @param inMemory {@code true} if the internal matrix should be stored in
* memory. If {@code false}, the memory will be re-computed as needed,
* increasing computation cost but uses no extra memory.
*/
public RFF_RBF(double sigma, int dim, boolean inMemory)
{
setSigma(sigma);
setDimensions(dim);
setInMemory(inMemory);
}
/**
* Creates a new RFF RBF object
* @param featurSize the number of numeric features in the original feature
* space
* @param sigma the positive sigma value for the {@link RBFKernel}
* @param dim the new feature size dimension to project into.
* @param rand the source of randomness to initialize internal state
* @param inMemory {@code true} if the internal matrix should be stored in
* memory. If {@code false}, the memory will be re-computed as needed,
* increasing computation cost but uses no extra memory.
*/
public RFF_RBF(int featurSize, double sigma, int dim, Random rand, boolean inMemory)
{
this(sigma, dim, inMemory);
if(featurSize <= 0)
throw new IllegalArgumentException("The number of numeric features must be positive, not " + featurSize);
if(sigma <= 0 || Double.isInfinite(sigma) || Double.isNaN(sigma))
throw new IllegalArgumentException("The sigma parameter must be positive, not " + sigma);
if(dim <= 1)
throw new IllegalArgumentException("The target dimension must be positive, not " + dim);
transform = new RandomMatrixRFF_RBF(Math.sqrt(0.5/(sigma*sigma)), featurSize, dim, rand.nextLong());
offsets = new RandomVectorRFF_RBF(dim, rand.nextLong());
if(inMemory)
{
transform = transform.add(0.0);//will copy into a new mutable and add nothing
offsets = new DenseVector(offsets);
}
}
@Override
public void fit(DataSet data)
{
int featurSize = data.getNumNumericalVars();
Random rand = RandomUtil.getRandom();
transform = new RandomMatrixRFF_RBF(Math.sqrt(0.5/(sigma*sigma)), featurSize, dim, rand.nextLong());
offsets = new RandomVectorRFF_RBF(dim, rand.nextLong());
if(inMemory)
{
transform = transform.add(0.0);//will copy into a new mutable and add nothing
offsets = new DenseVector(offsets);
}
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
protected RFF_RBF(RFF_RBF toCopy)
{
if(toCopy.transform != null)
this.transform = toCopy.transform.clone();
if(toCopy.offsets != null)
this.offsets = toCopy.offsets.clone();
this.dim = toCopy.dim;
this.inMemory = toCopy.inMemory;
this.sigma = toCopy.sigma;
}
@Override
public DataPoint transform(DataPoint dp)
{
Vec oldX = dp.getNumericalValues();
Vec newX = oldX.multiply(transform);
final double coef = Math.sqrt(2.0/transform.cols());
for(int i = 0; i < newX.length(); i++)
newX.set(i, Math.cos(newX.get(i)+offsets.get(i))*coef);
return new DataPoint(newX, dp.getCategoricalValues(), dp.getCategoricalData());
}
@Override
public RFF_RBF clone()
{
return new RFF_RBF(this);
}
private static class RandomMatrixRFF_RBF extends RandomMatrix
{
private static final long serialVersionUID = 4702514384718636893L;
private double coef;
public RandomMatrixRFF_RBF(double coef, int rows, int cols, long seedMult)
{
super(rows, cols, seedMult);
this.coef = coef;
}
@Override
protected double getVal(Random rand)
{
return coef*rand.nextGaussian();
}
}
private static class RandomVectorRFF_RBF extends RandomVector
{
private static final long serialVersionUID = -6132378281909907937L;
public RandomVectorRFF_RBF(int length, long seedMult)
{
super(length, seedMult);
}
@Override
protected double getVal(Random rand)
{
return rand.nextDouble()*2*Math.PI;
}
@Override
public Vec clone()
{
return this;
}
}
/**
* Sets whether or not the transform matrix is stored explicitly in memory
* or not. Explicit storage is often faster, but can be prohibitive for
* large feature sizes
* @param inMemory {@code true} to explicitly store the transform matrix,
* {@code false} to re-create it on the fly as needed
*/
public void setInMemory(boolean inMemory)
{
this.inMemory = inMemory;
}
/**
*
* @return {@code true} if this object will explicitly store the transform
* matrix, {@code false} to re-create it on the fly as needed
*/
public boolean isInMemory()
{
return inMemory;
}
/**
* Sets the number of dimensions in the new approximate space to use. This
* will be the number of numeric features in the transformed data, and
* larger values increase the accuracy of the approximation.
*
* @param dimensions
*/
public void setDimensions(int dimensions)
{
if (dimensions < 1)
throw new ArithmeticException("Number of dimensions must be a positive value, not " + dimensions);
this.dim = dimensions;
}
/**
* Returns the number of dimensions that will be used in the projected space
*
* @return the number of dimensions that will be used in the projected space
*/
public int getDimensions()
{
return dim;
}
/**
* Sets the σ parameter of the RBF kernel that is being approximated.
*
* @param sigma the positive value to use for σ
* @see RBFKernel#setSigma(double)
*/
public void setSigma(double sigma)
{
if (sigma <= 0.0 || Double.isInfinite(sigma) || Double.isNaN(sigma))
throw new IllegalArgumentException("Sigma must be a positive value, not " + sigma);
this.sigma = sigma;
}
/**
* Returns the σ value used for the RBF kernel approximation.
*
* @return the σ value used for the RBF kernel approximation.
*/
public double getSigma()
{
return sigma;
}
/**
* Guess the distribution to use for the kernel width term
* {@link #setSigma(double) σ} in the RBF kernel being approximated.
*
* @param d the data set to get the guess for
* @return the guess for the σ parameter in the RBF Kernel
*/
public Distribution guessSigma(DataSet d)
{
return RBFKernel.guessSigma(d);
}
}
| 9,233 | 30.515358 | 117 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/visualization/Isomap.java | /*
* Copyright (C) 2015 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.datatransform.visualization;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.linear.*;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.linear.vectorcollection.DefaultVectorCollection;
import jsat.linear.vectorcollection.VectorCollection;
import jsat.utils.FakeExecutor;
import jsat.utils.FibHeap;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.ParallelUtils;
/**
* Isomap is an extension of {@link MDS}. It uses a geodesic distance made from
* a nearest neighbor search of all the points in the data set. This
* implementation also includes the extension
* {@link #setCIsomap(boolean) C-Isomap}, which further weights distances by
* density.<br>
* <br>
* Note, that Isomap normally will fail on some datasets when two or more
* regions can not be connected in the induced neighbor graph. While increasing
* the number of neighbors considered will eventually resolve this problem, the
* separated groups may be desirable in practice. This implementation includes a
* non-standard addition that will forcibly connect such isolated regions with
* very large values, hoping to preserve the farther distances in the given
* dataset while maintaining local structure.<br>
* <br>
*
* See:<br>
* <ul>
* <li>Tenenbaum, J. B., Silva, V. De, & Langford, J. C. (2000). <i>A Global
* Geometric Framework for Nonlinear Dimensionality Reduction</i>. Science, 290,
* 2319–2323. doi:10.1126/science.290.5500.2319</li>
* <li>De Silva, V., & Tenenbaum, J. B. (2003). <i>Global Versus Local Methods
* in Nonlinear Dimensionality Reduction</i>. In Advances in Neural Information
* Processing Systems 15 (pp. 705–712). MIT Press. Retrieved from
* <a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.9.3407&rep=rep1&type=pdf">here</a></li>
* </ul>
*
* @author Edward Raff <[email protected]>
*/
public class Isomap implements VisualizationTransform
{
private DistanceMetric dm = new EuclideanDistance();
private VectorCollection<VecPaired<Vec, Integer>> vc = new DefaultVectorCollection<>();
private int searchNeighbors = 15;
private MDS mds = new MDS();
private boolean c_isomap = false;
/**
*
*/
public Isomap()
{
this(15, false);
}
/**
*
* @param searchNeighbors the number of nearest neighbors to consider
*/
public Isomap(int searchNeighbors)
{
this(searchNeighbors, false);
}
/**
*
* @param searchNeighbors the number of nearest neighbors to consider
* @param c_isomap {@code true} to use the C-Isomap extension, {@code false}
* for normal Isomap.
*/
public Isomap(int searchNeighbors, boolean c_isomap)
{
setNeighbors(searchNeighbors);
setCIsomap(c_isomap);
}
/**
* Set the number of neighbors to consider for the initial graph in Isomap
* @param searchNeighbors the number of nearest neighbors to consider
*/
public void setNeighbors(int searchNeighbors)
{
if(searchNeighbors < 2)
throw new IllegalArgumentException("number of neighbors considered must be at least 2, not " + searchNeighbors);
this.searchNeighbors = searchNeighbors;
}
/**
*
* @return the number of neighbors used when creating the initial graph
*/
public int getNeighbors()
{
return searchNeighbors;
}
/**
* Controls whether the C-Isomap extension is used. If set true, the initial
* distances will also be scaled based on the density of the region between
* the points. If false, normal Isomap will be used.
*
* @param c_isomap {@code true} to use the C-Isomap extension, {@code false}
* for normal Isomap.
*/
public void setCIsomap(boolean c_isomap)
{
this.c_isomap = c_isomap;
}
/**
*
* @return {@code true} if the C-Isomap extension is in use, {@code false}
* for normal Isomap.
*/
public boolean isCIsomap()
{
return c_isomap;
}
@Override
public <Type extends DataSet> Type transform(DataSet<Type> d, boolean parallel)
{
final int N = d.size();
final Matrix delta = new DenseMatrix(N, N);
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
if (i == j)
delta.set(i, j, 0);
else
delta.set(i, j, Double.MAX_VALUE);
final List<VecPaired<Vec, Integer>> vecs = new ArrayList<>(N);
for(int i = 0; i < N; i++)
vecs.add(new VecPaired<>(d.getDataPoint(i).getNumericalValues(), i));
vc.build(parallel, vecs, dm);
final List<Double> cache = dm.getAccelerationCache(vecs, parallel);
final int knn = searchNeighbors+1;//+1 b/c we are closest to ourselves
//bleh, ugly generics...
final List<List<? extends VecPaired<VecPaired<Vec, Integer>, Double>>> neighborGraph = new ArrayList<>();
for (int i = 0; i < N; i++)
neighborGraph.add(null);
final double[] avgNeighborDist = new double[N];
//do knn search and store results so we can do distances
ParallelUtils.run(parallel, N, (i)->
{
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> neighbors = vc.search(vecs.get(i).getVector(), knn);
neighborGraph.set(i, neighbors);
//Compute stats that may be used for c-isomap version
for (int z = 1; z < neighbors.size(); z++)
{
VecPaired<VecPaired<Vec, Integer>, Double> neighbor = neighbors.get(z);
double dist = neighbor.getPair();
avgNeighborDist[i] += dist;
}
avgNeighborDist[i] /= (neighbors.size()-1);
});
if(c_isomap)
{
int i = 0;
for(List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> neighbors : neighborGraph)
{
for(VecPaired<VecPaired<Vec, Integer>, Double> neighbor : neighbors)
neighbor.setPair(neighbor.getPair()/Math.sqrt(avgNeighborDist[neighbor.getVector().getPair()]+avgNeighborDist[i]+1e-6));
i++;
}
}
ParallelUtils.run(parallel, N, (k)->
{
double[] tmp_dist = dijkstra(neighborGraph, k);
for (int i = 0; i < N; i++)
{
tmp_dist[i] = Math.min(tmp_dist[i], delta.get(k, i));
delta.set(i, k, tmp_dist[i]);
delta.set(k, i, tmp_dist[i]);
}
});
//lets check for any disjoint groupings, replace them with something reasonable
//we will use the largest obtainable distance to be an offset for our infinity distances
double largest_natural_dist_tmp = 0;
for (int i = 0; i < N; i++)
for (int j = i + 1; j < N; j++)
if(delta.get(i, j) < Double.MAX_VALUE)
largest_natural_dist_tmp = Math.max(largest_natural_dist_tmp, delta.get(i, j));
final double largest_natural_dist = largest_natural_dist_tmp;
ParallelUtils.run(parallel, N, (i)->
{
for (int j = i + 1; j < N; j++)
{
double d_ij = delta.get(i, j);
if (d_ij >= Double.MAX_VALUE)//replace with the normal distance + 1 order of magnitude?
{
d_ij = 10*dm.dist(i, j, vecs, cache)+1.5*largest_natural_dist;
delta.set(i, j, d_ij);
delta.set(j, i, d_ij);
}
}
});
SimpleDataSet emedded = mds.transform(delta, parallel);
DataSet<Type> transformed = d.shallowClone();
transformed.replaceNumericFeatures(emedded.getDataVectors());
return (Type) transformed;
}
private double[] dijkstra(List<List<? extends VecPaired<VecPaired<Vec, Integer>, Double>>> neighborGraph, int sourceIndex)
{
//TODO generalize and move this out into some other class as a static method
final int N = neighborGraph.size();
double[] dist = new double[N];
Arrays.fill(dist, Double.POSITIVE_INFINITY);
dist[sourceIndex] = 0;
List<FibHeap.FibNode<Integer>> nodes = new ArrayList<>(N);
FibHeap<Integer> Q = new FibHeap<>();
for (int i = 0; i < N; i++)
nodes.add(null);
nodes.set(sourceIndex, Q.insert(sourceIndex, dist[sourceIndex]));
while (Q.size() > 0)
{
FibHeap.FibNode<Integer> u = Q.removeMin();
int u_indx = u.getValue();
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> neighbors = neighborGraph.get(u_indx);
for (int z = 1; z < neighbors.size(); z++)
{
VecPaired<VecPaired<Vec, Integer>, Double> neighbor = neighbors.get(z);
int j = neighbor.getVector().getPair();
double u_j_dist = neighbor.getPair();
double alt = dist[u_indx] + u_j_dist;
if (alt < dist[j])
{
dist[j] = alt;
//prev[j] ← u
if(nodes.get(j) == null)
nodes.set(j, Q.insert(j, alt));
else
Q.decreaseKey(nodes.get(j), alt);
}
}
}
return dist;
}
@Override
public int getTargetDimension()
{
return mds.getTargetDimension();
}
@Override
public boolean setTargetDimension(int target)
{
return mds.setTargetDimension(target);
}
}
| 10,807 | 35.513514 | 140 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/visualization/LargeViz.java | /*
* Copyright (C) 2016 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.datatransform.visualization;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicLong;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.datatransform.DataTransform;
import jsat.distributions.Uniform;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* LargeViz is an algorithm for creating low dimensional embeddings for
* visualization. It is meant to be faster and better quality than
* {@link TSNE t-SNE} without requiring any parameter tuning to get good
* results. LargeViz is related to t-SNE in how the neighbor graph is
* constructed, and the {@link #setPerplexity(double) perplexity} parameter for
* LargeViz has the same meaning and impact as the perplexity parameter in
* t-SNE.<br>
* <br>
* NOTE: The origina LargeViz paper includes a faster scheme for approximately
* constructing the nearest neighbor graph. This is not yet implemented, but has
* no negative impact on the quality of the result.
* <br>
* See: Tang, J., Liu, J., Zhang, M., & Mei, Q. (2016). Visualizing Large-scale
* and High-dimensional Data. In Proceedings of the 25th International
* Conference on World Wide Web (pp. 287–297). Republic and Canton of Geneva,
* Switzerland: International World Wide Web Conferences Steering Committee.
* doi:10.1145/2872427.2883041
* @author Edward Raff
*/
public class LargeViz implements VisualizationTransform
{
private DistanceMetric dm_source = new EuclideanDistance();
private DistanceMetric dm_embed = new EuclideanDistance();
private double perplexity = 50;
private int dt = 2;
/**
* This is the number of negative samples to take for each vertex <br>
* "number of negative samples is set as 5"
*/
private int M = 5;
/**
* "γ is set as 7 by default"
*/
private double gamma = 7;
/**
* Sets the target perplexity of the gaussian used over each data point. The
* perplexity can be thought of as a quasi desired number of nearest
* neighbors to be considered, but is adapted based on the distribution of
* the data. Increasing the perplexity can increase the amount of time it
* takes to get an embedding. Using a value in the range of [5, 100] is
* recommended.
*
* @param perplexity the quasi number of neighbors to consider for each data point
*/
public void setPerplexity(double perplexity)
{
if(perplexity <= 0 || Double.isNaN(perplexity) || Double.isInfinite(perplexity))
throw new IllegalArgumentException("perplexity must be positive, not " + perplexity);
this.perplexity = perplexity;
}
/**
*
* @return the target perplexity to use for each data point
*/
public double getPerplexity()
{
return perplexity;
}
/**
* Sets the distance metric to use for the original space. This will
* determine the target nearest neighbors to keep close to each other in
* the embedding space
*
* @param dm the distance metric to use
*/
public void setDistanceMetricSource(DistanceMetric dm)
{
this.dm_source = dm;
}
/**
* Sets the distance metric to use for the embedded space. This will
* determine the actual nearest neighbors as the occur in the embedded space.
*
* @param dm the distance metric to use
*/
public void setDistanceMetricEmbedding(DistanceMetric dm)
{
this.dm_embed = dm;
}
/**
* Sets the number of negative neighbor samples to obtain for each data
* point. The default recommended value is 5.
*
* @param M the number of negative samples to use for each update
*/
public void setNegativeSamples(int M)
{
if(M < 1)
throw new IllegalArgumentException("Number of negative samples must be positive, not " + M);
this.M = M;
}
/**
*
* @return the number of negative samples to use for each update
*/
public int getNegativeSamples()
{
return M;
}
/**
* Gamma controls the negative weight assigned to negative edges in the
* optimization problem. Large values will place a higher emphasis on
* separating non-neighbors in the embedded space. The default recommend
* value is 7.
*
* @param gamma the weight for negative edge samples
*/
public void setGamma(double gamma)
{
if(Double.isInfinite(gamma) || Double.isNaN(gamma) || gamma <= 0)
throw new IllegalArgumentException("Gamma must be positive, not " + gamma);
this.gamma = gamma;
}
/**
*
* @return the weight for negative edge samples
*/
public double getGamma()
{
return gamma;
}
@Override
public int getTargetDimension()
{
return dt;
}
@Override
public boolean setTargetDimension(int target)
{
if(target < 2)
return false;
dt = target;
return true;
}
@Override
public <Type extends DataSet> Type transform(DataSet<Type> d, boolean parallel)
{
Random rand = RandomUtil.getRandom();
final ThreadLocal<Random> local_rand = ThreadLocal.withInitial(RandomUtil::getRandom);
final int N = d.size();
//If perp set too big, the search size would be larger than the dataset size. So min to N
/**
* form sec 4.1: "we compute the sparse approximation by finding the
* floor(3u) nearest neighbors of each of the N input objects (recall
* that u is the perplexity of the conditional distributions)"
*/
final int knn = (int) Math.min(Math.floor(3*perplexity), N-1);
/**
* P_ij does not change at this point, so lets compute these values only
* once please! j index matches up to the value stored in nearMe.
* Would be W_ij in notation of LargeViz paper, but P_ij form TSNE paper
*/
final double[][] nearMePij = new double[N][knn];
/**
* Each row is the set of 3*u indices returned by the NN search
*/
final int[][] nearMe = new int[N][knn];
TSNE.computeP(d, parallel, rand, knn, nearMe, nearMePij, dm_source, perplexity);
final double[][] nearMeSample = new double[N][knn];
/**
* Array of the sample weights used to perform the negative sampling.
*
* Initial value is out-degree defined in LINE paper, section 4.1.2.
*/
final double[] negSampleWeight = new double[N];
double negSum = 0;
for(int i = 0; i < N; i++)
{
double sum = DenseVector.toDenseVec(nearMePij[i]).sum();
sum += nearMePij[i].length*Double.MIN_VALUE;
negSampleWeight[i] = sum;
nearMeSample[i][0] = nearMePij[i][0];
for(int j = 1; j < knn; j++)//make cumulative
nearMeSample[i][j] = Math.ulp(nearMePij[i][j]) + nearMePij[i][j] + nearMeSample[i][j-1];
for(int j = 1; j < knn; j++)//normalize
nearMeSample[i][j] /= sum;
negSampleWeight[i] = Math.pow(negSampleWeight[i], 0.75);
negSum += negSampleWeight[i];
if(i > 0)
negSampleWeight[i] += negSampleWeight[i-1];
}
//normalize to [0, 1] range
for(int i = 0; i < N; i++)
negSampleWeight[i]/= negSum;
final List<Vec> embeded = new ArrayList<>();
Uniform initDistribution = new Uniform(-0.00005/dt, 0.00005/dt);
for(int i = 0; i < N; i++)
embeded.add(initDistribution.sampleVec(dt, rand));
/**
* Number of threads to use. Paper suggests asynch updates and just
* ignore unsafe alters b/c diff should be minor. Adding some extra
* logic so that we have at least a good handful of points per thread to
* avoid excessive edits on small datasets.
*/
final int threads_to_use = Math.max(Math.min(N/(200*M), SystemInfo.LogicalCores), 1);
final CountDownLatch latch = new CountDownLatch(threads_to_use);
/*
* Objective is
* w*(log(1/(1+g(x)^2)) + y log(1−1/(1+g(x)^2 )))
* where g(x) is the euclidean distance adn G(x) is g(x)^2
* d/x of ||x-y|| = (x-y)/||x-y||
* d/y of ||x-y|| = -(x-y)/||x-y||
* left hand side derivative of log(1/(1+g(x))) =
* = -(2 g(x) g'(x))/(g(x)^2+1)
* = -(2 ||x-y|| (x-y)/||x-y||)/(||x-y||^2+1)
* = -(2 (x-y))/(||x-y||^2+1)
* for d/y
* = -(2 (y-x))/(||x-y||^2+1)
*
* Right hand side portion
* derivative of z* log(1-1/(1+g(x)^2))
* = (2 z g'(x))/(g(x) (g(x)^2+1))
* = (2 z (x-y))/(||x-y||^2 (||x-y||^2+1))
* or for d/y
* = (2 z (y-x))/(||x-y||^2 (||x-y||^2+1))
* NOTE: My derivative dosn't work. But adding
* an extra multiplication by ||x-y|| seems to fix everything? Want to
* come back and figure this out better.
*/
final double eta_0 = 1.0;
final long iterations = 1000L*N;
final ThreadLocal<Vec> local_grad_i = ThreadLocal.withInitial(()->new DenseVector(dt));
final ThreadLocal<Vec> local_grad_j = ThreadLocal.withInitial(()->new DenseVector(dt));
final ThreadLocal<Vec> local_grad_k = ThreadLocal.withInitial(()->new DenseVector(dt));
AtomicLong curIteration = new AtomicLong();
ParallelUtils.run(parallel, N, (start, end)->
{
Random l_rand = local_rand.get();
//b/c indicies are selected at random everyone can use same iterator order
//more important is to make sure the range length is the same so that
//eta has the same range and effect in aggregate
//To avoid issues with large datests, we want 1000 * N iterations
//so do an iteration of our N/P 1000 times to get the correct amount
for(int moreTimes = 0; moreTimes < 1000; moreTimes++)
for(int iter = start; iter < end; iter++ )
{
double eta = eta_0*(1-curIteration.getAndIncrement()/(double)iterations);
eta = Math.max(eta, 0.0001);
int i = l_rand.nextInt(N);
//sample neighbor weighted by distance
int j = Arrays.binarySearch(nearMeSample[i], l_rand.nextDouble());
if (j < 0)
j = -(j) - 1;
if(j >= knn)///oops. Can be hard to sample / happen with lots of near by near 0 dists
{
//lets fall back to picking someone at random
j = l_rand.nextInt(knn);
}
j = nearMe[i][j];
Vec y_i = embeded.get(i);
Vec y_j = embeded.get(j);
//right hand side update for the postive sample
final double dist_ij = dm_embed.dist(i, j, embeded, null);
final double dist_ij_sqrd = dist_ij*dist_ij;
if(dist_ij <= 0 )
continue;//how did that happen?
Vec grad_i = local_grad_i.get();
Vec grad_j = local_grad_j.get();
Vec grad_k = local_grad_k.get();
y_i.copyTo(grad_j);
grad_j.mutableSubtract(y_j);
grad_j.mutableMultiply(-2*dist_ij/(dist_ij_sqrd+1));
grad_j.copyTo(grad_i);
//negative sampling time
for(int k = 0; k < M; k++)
{
int jk = -1;
do
{
jk = Arrays.binarySearch(negSampleWeight, l_rand.nextDouble());
if (jk < 0)
jk = -(jk) - 1;
if(jk == i || jk == j)
jk = -1;
//code to reject neighbors for sampling if too close
//Not sure if this code helps or hurts... not mentioned in paper
for(int search = 0; search < nearMe[i].length; search++)
if(nearMe[i][search] == jk && nearMeSample[i][search] < 0.98)
{
jk = -1;//too close to me!
break;
}
}
while(jk < 0);
//(2 z (y-x))/(||x-y||^2 (||x-y||^2+1))
Vec y_k = embeded.get(jk);
final double dist_ik = dm_embed.dist(i, jk, embeded, null);//dist(y_i, y_k);
final double dist_ik_sqrd = dist_ik*dist_ik;
if (dist_ik < 1e-12)
continue;
y_i.copyTo(grad_k);
grad_k.mutableSubtract(y_k);
grad_k.mutableMultiply(2*gamma/(dist_ik*(dist_ik_sqrd+1)));
grad_i.mutableAdd(grad_k);
y_k.mutableSubtract(eta, grad_k);
}
y_i.mutableAdd( eta, grad_i);
y_j.mutableAdd(-eta, grad_j);
}
});
DataSet<Type> toRet = d.shallowClone();
final IdentityHashMap<DataPoint, Integer> indexMap = new IdentityHashMap<>(N);
for(int i = 0; i < N; i++)
indexMap.put(d.getDataPoint(i), i);
toRet.applyTransform(new DataTransform()
{
@Override
public DataPoint transform(DataPoint dp)
{
int i = indexMap.get(dp);
return new DataPoint(embeded.get(i), dp.getCategoricalValues(), dp.getCategoricalData());
}
@Override
public void fit(DataSet data)
{
}
@Override
public DataTransform clone()
{
return this;
}
});
return (Type) toRet;
}
} | 15,513 | 36.47343 | 105 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/visualization/MDS.java | /*
* Copyright (C) 2015 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.datatransform.visualization;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.DataPoint;
import jsat.linear.DenseMatrix;
import jsat.linear.Matrix;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.math.OnLineStatistics;
import jsat.utils.FakeExecutor;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.AtomicDouble;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
import jsat.utils.random.XORWOW;
/**
* Multidimensional scaling is an algorithm for finding low dimensional
* embeddings of arbitrary distance matrices. MDS will attempt to find an
* embedding that maintains the same pair-wise distances between all items in
* the distance matrix. MDS is a non-convex problem, so different runs can
* produce different results. <br>
* <br>
* MDS can be used on arbitrary dissimilarity matrices by calling {@link #transform(jsat.linear.Matrix, java.util.concurrent.ExecutorService)
* }.
*
* @author Edward Raff <[email protected]>
*/
public class MDS implements VisualizationTransform
{
private static DistanceMetric embedMetric = new EuclideanDistance();
private DistanceMetric dm = new EuclideanDistance();
private double tolerance = 1e-3;
private int maxIterations = 300;
private int targetSize = 2;
/**
* Sets the tolerance parameter for determining convergence.
* @param tolerance the tolerance for declaring convergence
*/
public void setTolerance(double tolerance)
{
if(tolerance < 0 || Double.isInfinite(tolerance) || Double.isNaN(tolerance))
throw new IllegalArgumentException("tolerance must be a non-negative value, not " + tolerance);
this.tolerance = tolerance;
}
/**
*
* @return the tolerance parameter
*/
public double getTolerance()
{
return tolerance;
}
/**
* Sets the distance metric to use when creating the initial dissimilarity
* matrix of a new dataset. By default the {@link EuclideanDistance Euclidean
* } distance is used, but any distance may be substituted.The chosen
* distance need not be a valid metric, its only requirement is symmetry.
*
* @param embedMetric the distance metric to use when creating the
* dissimilarity matrix.
*/
public void setEmbeddingMetric(DistanceMetric embedMetric)
{
this.embedMetric = embedMetric;
}
/**
*
* @return the distance metric used when creating a dissimilarity matrix
*/
public DistanceMetric getEmbeddingMetric()
{
return embedMetric;
}
@Override
public <Type extends DataSet> Type transform(final DataSet<Type> d, boolean parallel)
{
final List<Vec> orig_vecs = d.getDataVectors();
final List<Double> orig_distCache = dm.getAccelerationCache(orig_vecs, parallel);
final int N = orig_vecs.size();
//Delta is the true disimilarity matrix
final Matrix delta = new DenseMatrix(N, N);
OnLineStatistics avg = ParallelUtils.run(parallel, N, (i)->
{
OnLineStatistics local_avg = new OnLineStatistics();
for(int j = i+1; j < d.size(); j++)
{
double dist = dm.dist(i, j, orig_vecs, orig_distCache);
local_avg.add(dist);
delta.set(i, j, dist);
delta.set(j, i, dist);
}
return local_avg;
}, (a,b)->OnLineStatistics.add(a, b));
SimpleDataSet embeded = transform(delta, parallel);
//place the solution in a dataset of the correct type
DataSet<Type> transformed = d.shallowClone();
transformed.replaceNumericFeatures(embeded.getDataVectors());
return (Type) transformed;
}
public SimpleDataSet transform(Matrix delta)
{
return transform(delta, false);
}
public SimpleDataSet transform(final Matrix delta, boolean parallel)
{
final int N = delta.rows();
Random rand = RandomUtil.getRandom();
final Matrix X = new DenseMatrix(N, targetSize);
final List<Vec> X_views = new ArrayList<>();
for(int i = 0; i < N; i++)
{
for(int j = 0; j < targetSize; j++)
X.set(i, j, rand.nextDouble());
X_views.add(X.getRowView(i));
}
final List<Double> X_rowCache = embedMetric.getAccelerationCache(X_views, parallel);
//TODO, special case solution when all weights are the same, want to add general case as well
Matrix V_inv = new DenseMatrix(N, N);
for(int i = 0; i < N; i++)
for(int j = 0; j < N; j++)
{
if(i == j)
V_inv.set(i, j, (1.0-1.0/N)/N);
else
V_inv.set(i, j, (0.0-1.0/N)/N);
}
double stressChange = Double.POSITIVE_INFINITY;
double oldStress = stress(X_views, X_rowCache, delta, parallel);
//the gutman transform matrix
final Matrix B = new DenseMatrix(N, N);
final Matrix X_new = new DenseMatrix(X.rows(), X.cols());
for(int iter = 0; iter < maxIterations && stressChange > tolerance; iter++ )
{
ParallelUtils.run(parallel, B.rows(), (i)->
{
for (int j = i + 1; j < B.rows(); j++)
{
double d_ij = embedMetric.dist(i, j, X_views, X_rowCache);
if(d_ij > 1e-5)//avoid creating silly huge values
{
double b_ij = -delta.get(i, j)/d_ij;//-w_ij if we support weights in the future
B.set(i, j, b_ij);
B.set(j, i, b_ij);
}
else
{
B.set(i, j, 0);
B.set(j, i, 0);
}
}
});
X_new.zeroOut();
//set the diagonal values
for(int i = 0; i < B.rows(); i++)
{
B.set(i, i, 0);
for (int k = 0; k < B.cols(); k++)
if (k != i)
B.increment(i, i, -B.get(i, k));
}
// Matrix X_new = V_inv.multiply(B, ex).multiply(X, ex);
B.multiply(X, X_new, ParallelUtils.CACHED_THREAD_POOL);
X_new.mutableMultiply(1.0/N);
X_new.copyTo(X);
X_rowCache.clear();
X_rowCache.addAll(embedMetric.getAccelerationCache(X_views, parallel));
double newStress = stress(X_views, X_rowCache, delta, parallel);
stressChange = Math.abs(oldStress-newStress);
oldStress = newStress;
}
SimpleDataSet sds = new SimpleDataSet(targetSize, new CategoricalData[0]);
for(Vec v : X_views)
sds.add(new DataPoint(v));
return sds;
}
private static double stress(final List<Vec> X_views, final List<Double> X_rowCache, final Matrix delta, boolean parallel)
{
return ParallelUtils.run(parallel, delta.rows(), (i)->
{
double localStress = 0;
for(int j = i+1; j < delta.rows(); j++)
{
double tmp = embedMetric.dist(i, j, X_views, X_rowCache)-delta.get(i, j);
localStress += tmp*tmp;
}
return localStress;
}, (a,b)->a+b);
}
@Override
public int getTargetDimension()
{
return targetSize;
}
@Override
public boolean setTargetDimension(int target)
{
if(target < 1)
return false;
this.targetSize = target;
return true;
}
}
| 8,931 | 33.620155 | 141 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/visualization/TSNE.java | /*
* Copyright (C) 2015 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.datatransform.visualization;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.datatransform.*;
import jsat.distributions.Normal;
import jsat.linear.*;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.linear.vectorcollection.DefaultVectorCollection;
import jsat.linear.vectorcollection.VectorCollection;
import jsat.math.FastMath;
import jsat.math.optimization.stochastic.*;
import jsat.math.rootfinding.Zeroin;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.AtomicDouble;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* t-distributed Stochastic Neighbor Embedding is an algorithm for creating low
* dimensional embeddings of datasets, for the purpose of visualization. It
* attempts to keep points that are near each other in the original space near
* each other in the low dimensional space as well, with less emphasis on
* maintaining far-away relationships in the data. This implementation uses the
* approximated gradients to learn the embedding in O(n log n) time.<br>
* <br>
* If the input dataset has a dimension greater than 50, it is advisable to
* project the data set down to 50 dimensions using {@link PCA} or some similar
* technique.<br>
* <br>
* See:<br>
* <ul>
* <li>Maaten, L. Van Der, & Hinton, G. (2008). <i>Visualizing Data using
* t-SNE</i>. Journal of Machine Learning Research, 9, 2579–2605.</li>
* <li>Van der Maaten, L. (2014). <i>Accelerating t-SNE using Tree-Based
* Algorithms</i>. Journal of Machine Learning Research, 15, 3221–3245.
* Retrieved from
* <a href="http://jmlr.org/papers/v15/vandermaaten14a.html">here</a></li>
* </ul>
*
* @author Edward Raff
*/
public class TSNE implements VisualizationTransform
{
private double alpha = 4;
private double exageratedPortion = 0.25;
private DistanceMetric dm = new EuclideanDistance();
private int T = 1000;
private double perplexity = 30;
private double theta = 0.5;
/**
* The target embedding dimension, hard coded to 2 for now
*/
private int s = 2;
/**
* α is the "early exaggeration" constant. It is a multiple applied to
* part of the gradient for th first quarter of iterations, and can improve
* the quality of the solution found. A value in the range of [4, 20] is
* recommended.
*
* @param alpha the exaggeration constant
*/
public void setAlpha(double alpha)
{
if(alpha <= 0 || Double.isNaN(alpha) || Double.isInfinite(alpha))
throw new IllegalArgumentException("alpha must be positive, not " + alpha);
this.alpha = alpha;
}
/**
*
* @return the "early exaggeration" constant
*/
public double getAlpha()
{
return alpha;
}
/**
* Sets the target perplexity of the gaussian used over each data point. The
* perplexity can be thought of as a quasi desired number of nearest
* neighbors to be considered, but is adapted based on the distribution of
* the data. Increasing the perplexity can increase the amount of time it
* takes to get an embedding. Using a value in the range of [5, 50] is
* recommended.
*
* @param perplexity the quasi number of neighbors to consider for each data point
*/
public void setPerplexity(double perplexity)
{
if(perplexity <= 0 || Double.isNaN(perplexity) || Double.isInfinite(perplexity))
throw new IllegalArgumentException("perplexity must be positive, not " + perplexity);
this.perplexity = perplexity;
}
/**
*
* @return the target perplexity to use for each data point
*/
public double getPerplexity()
{
return perplexity;
}
/**
* Sets the desired number of gradient descent iterations to perform.
* @param T the number of gradient descent iterations
*/
public void setIterations(int T)
{
if(T <= 1)
throw new IllegalArgumentException("number of iterations must be positive, not " + T);
this.T = T;
}
/**
*
* @return the number of gradient descent iterations to perform
*/
public int getIterations()
{
return T;
}
@Override
public <Type extends DataSet> Type transform(DataSet<Type> d, boolean parallel)
{
Random rand = RandomUtil.getRandom();
final int N = d.size();
//If perp set too big, the search size would be larger than the dataset size. So min to N
/**
* form sec 4.1: "we compute the sparse approximation by finding the
* floor(3u) nearest neighbors of each of the N input objects (recall
* that u is the perplexity of the conditional distributions)"
*/
final int knn = (int) Math.min(Math.floor(3*perplexity), N-1);
/**
* P_ij does not change at this point, so lets compute these values only
* once please! j index matches up to the value stored in nearMe
*/
final double[][] nearMePij = new double[N][knn];
/**
* Each row is the set of 3*u indices returned by the NN search
*/
final int[][] nearMe = new int[N][knn];
computeP(d, parallel, rand, knn, nearMe, nearMePij, dm, perplexity);
Normal normalDIst = new Normal(0, 1e-4);
/**
* For now store all data in a 2d array to avoid excessive overhead / cache missing
*/
final double[] y = normalDIst.sample(N*s, rand);
final double[] y_grad = new double[y.length];
//vec wraped version for convinence
final Vec y_vec = DenseVector.toDenseVec(y);
final Vec y_grad_vec = DenseVector.toDenseVec(y_grad);
GradientUpdater gradUpdater = new Adam();
gradUpdater.setup(y.length);
for (int iter = 0; iter < T; iter++)//optimization
{
final int ITER = iter;
Arrays.fill(y_grad, 0);
//First loop for the F_rep forces, we do this first to normalize so we can use 1 work space for the gradient
final Quadtree qt = new Quadtree(y);
//TODO might not result in even load
double Z = ParallelUtils.run(parallel, N, (start, end)->
{
double[] workSpace = new double[s];
double local_Z = 0;
for (int i = start; i < end; i ++)
{
Arrays.fill(workSpace, 0.0);
local_Z += computeF_rep(qt.root, i, y, workSpace);
//should be multiplied by 4, rolling it into the normalization by Z after
for (int k = 0; k < s; k++)
inc_z_ij(workSpace[k], i, k, y_grad, s);
}
return local_Z;
}, (a,b)->a+b);
//normalize by Z
final double zNorm = 4.0/(Z+1e-13);
for(int i = 0; i < y.length; i++)
y_grad[i] *= zNorm;
//This second loops computes the F_attr forces
final CountDownLatch latch_g1 = new CountDownLatch(SystemInfo.LogicalCores);
ParallelUtils.run(parallel, N, (start,end)->
{
for (int i = start; i < end; i++)//N
{
for(int j_indx = 0; j_indx < knn; j_indx ++) //O(u)
{
int j = nearMe[i][j_indx];
if(i == j)//this should never happen b/c we skipped that when creating nearMe
continue;
double pij = nearMePij[i][j_indx];
if(ITER < T*exageratedPortion)
pij *= alpha;
double cnst = pij*q_ijZ(i, j, y, s)*4;
for(int k = 0; k < s; k++)
{
double diff = z_ij(i, k, y, s)-z_ij(j, k, y, s);
inc_z_ij(cnst*diff, i, k, y_grad, s);
}
}
}
});
//now we have accumulated all gradients
double eta = 200;
gradUpdater.update(y_vec, y_grad_vec, eta);
}
DataSet<Type> transformed = d.shallowClone();
final IdentityHashMap<DataPoint, Integer> indexMap = new IdentityHashMap<>(N);
for(int i = 0; i < N; i++)
indexMap.put(d.getDataPoint(i), i);
transformed.applyTransform(new DataTransform()
{
@Override
public DataPoint transform(DataPoint dp)
{
int i = indexMap.get(dp);
DenseVector dv = new DenseVector(s);
for(int k = 0; k < s; k++)
dv.set(k, y[i*2+k]);
return new DataPoint(dv, dp.getCategoricalValues(), dp.getCategoricalData());
}
@Override
public void fit(DataSet data)
{
}
@Override
public DataTransform clone()
{
return this;
}
});
return (Type) transformed;
}
/**
*
* @param d the dataset to search
* @param parallel {@code true} if computation should be done with multiple threads, {@code false} for single threaded
* @param rand source of randomness
* @param knn the number of neighbors to search for
* @param nearMe each row is the set of knn indices returned by the NN search
* @param nearMePij the symmetrized neighbor probability
* @param dm the distance metric to use for determining closeness
* @param perplexity the perplexity value for the effective nearest neighbor search and weighting
*/
protected static void computeP(DataSet d, boolean parallel, Random rand, final int knn, final int[][] nearMe, final double[][] nearMePij, final DistanceMetric dm, final double perplexity)
{
@SuppressWarnings("unchecked")
final List<Vec> vecs = d.getDataVectors();
final List<Double> accelCache = dm.getAccelerationCache(vecs, parallel);
final int N = vecs.size();
final VectorCollection<Vec> vc = new DefaultVectorCollection<>();
vc.build(parallel, vecs, dm);
final List<List<? extends VecPaired<Vec, Double>>> neighbors = new ArrayList<>(N);
for(int i = 0; i < N; i++)
neighbors.add(null);
//new scope b/c I don't want to leark the silly vecIndex thing
{
//Used to map vecs back to their index so we can store only the ones we need in nearMe
final IdentityHashMap<Vec, Integer> vecIndex = new IdentityHashMap<>(N);
for(int i = 0; i < N; i++)
vecIndex.put(vecs.get(i), i);
ParallelUtils.run(parallel, N, (i)->
{
Vec x_i = vecs.get(i);
List<? extends VecPaired<Vec, Double>> closest = vc.search(x_i, knn+1);//+1 b/c self is closest
neighbors.set(i, closest);
for (int j = 1; j < closest.size(); j++)
{
nearMe[i][j - 1] = vecIndex.get(closest.get(j).getVector());
}
});
}
//Now lets figure out everyone's sigmas
final double[] sigma = new double[N];
final AtomicDouble minSigma = new AtomicDouble(Double.POSITIVE_INFINITY);
final AtomicDouble maxSigma = new AtomicDouble(0);
for(int i = 0; i < N; i++)//first lets figure out a min/max range
{
List<? extends VecPaired<Vec, Double>> n_i = neighbors.get(i);
double min = n_i.get(1).getPair();
double max = n_i.get(Math.min(knn, n_i.size()-1)).getPair();
minSigma.set(Math.min(minSigma.get(), Math.max(min, 1e-9)));//avoid seting 0 as min
maxSigma.set(Math.max(maxSigma.get(), max));
}
//now compute the bandwidth for each datum
ParallelUtils.run(parallel, N, (i)->
{
boolean tryAgain = false;
do
{
tryAgain = false;
try
{
double sigma_i = Zeroin.root(1e-2, 100, minSigma.get(), maxSigma.get(),
(double x) -> perp(i, nearMe, x, neighbors, vecs, accelCache, dm) - perplexity);
sigma[i] = sigma_i;
}
catch (ArithmeticException exception)//perp not in search range?
{
if(maxSigma.get() >= Double.MAX_VALUE/2)
{
//Why can't we find a range that fits? Just pick a value..
//Not max value, but data is small.. so lets just set someting to break the loop
sigma[i] = 1e100;
}
else
{
tryAgain = true;
minSigma.set(Math.max(minSigma.get() / 2, 1e-6));
maxSigma.set(Math.min(maxSigma.get() * 2, Double.MAX_VALUE / 2));
}
}
}
while (tryAgain);
});
ParallelUtils.run(parallel, N, (i)->
{
for(int j_indx = 0; j_indx < knn; j_indx++)
{
int j = nearMe[i][j_indx];
nearMePij[i][j_indx] = p_ij(i, j, sigma[i], sigma[j], neighbors, vecs, accelCache, dm);
}
});
}
/**
*
* @param node the node to begin computing from
* @param i
* @param z
* @param workSpace the indicies are the accumulated contribution to the
* gradient sans multiplicative terms in the first 2 indices.
* @return the contribution to the normalizing constant Z
*/
private double computeF_rep(Quadtree.Node node, int i, double[] z, double[] workSpace)
{
if(node == null || node.N_cell == 0 || node.indx == i)
return 0;
/*
* Original paper says to use the diagonal divided by the squared 2
* norm. This dosn't seem to work at all. Tried some different ideas
* with 0.5 as the threshold until I found one that worked.
* Squaring the values would normally not be helpful, but since we are working with tiny values it makes them smaller, making it easier to hit the go
*/
double x = z[i*2];
double y = z[i*2+1];
// double r_cell = node.diagLen();
double r_cell = Math.max(node.maxX-node.minX, node.maxY-node.minY);
r_cell*=r_cell;
double mass_x = node.x_mass/node.N_cell;
double mass_y = node.y_mass/node.N_cell;
double dot = (mass_x-x)*(mass_x-x)+(mass_y-y)*(mass_y-y);
if(node.NW == null || r_cell < theta*dot)//good enough!
{
if(node.indx == i)
return 0;
double Z = 1.0/(1.0 + dot);
double q_cell_Z_sqrd = -node.N_cell*(Z*Z);
workSpace[0] += q_cell_Z_sqrd*(x-mass_x);
workSpace[1] += q_cell_Z_sqrd*(y-mass_y);
return Z*node.N_cell;
}
else//further subdivide
{
double Z_sum = 0;
for(Quadtree.Node child : node)
Z_sum += computeF_rep(child, i, z, workSpace);
return Z_sum;
}
}
/**
*
* @param val the value to add to the array
* @param i the index of the data point to add to
* @param j the dimension index of the embedding
* @param z the storage of the embedded vectors
* @param s the dimension of the embedding
*/
private static void inc_z_ij(double val, int i, int j, double[] z, int s)
{
z[i*s+j] += val;
}
private static double z_ij(int i, int j, double[] z, int s)
{
return z[i*s+j];
}
/**
* Computes the value of q<sub>ij</sub> Z
* @param i
* @param j
* @param z
* @param s
* @return
*/
private static double q_ijZ(int i, int j, double[] z, int s)
{
double denom =1;
for(int k = 0; k < s; k++)
{
double diff = z_ij(i, k, z, s)-z_ij(j, k, z, s);
denom += diff*diff;
}
return 1.0/denom;
}
/**
* Computes p<sub>j|i</sub>
* @param j
* @param i
* @param sigma
* @param neighbors
* @return
*/
private static double p_j_i(int j, int i, double sigma, List<List<? extends VecPaired<Vec, Double>>> neighbors, List<Vec> vecs, List<Double> accelCache, DistanceMetric dm)
{
/*
* "Because we are only interested in modeling pairwise similarities, we
* set the value of pi|i to zero" from Visualizing Data using t-SNE
*/
if(i == j)
return 0;
//nearest is self, use taht to get indexed values
Vec x_j = neighbors.get(j).get(0).getVector();
// Vec x_i = neighbors.get(i).get(0).getVector();
final double sigmaSqrdInv = 1/(2*(sigma*sigma));
double numer = 0;
double denom = 0;
boolean jIsNearBy = false;
final List<? extends VecPaired<Vec, Double>> neighbors_i = neighbors.get(i);
for (int k = 1; k < neighbors_i.size(); k++)//SUM over k != i
{
VecPaired<Vec, Double> neighbor_ik = neighbors_i.get(k);
final double d_ik = neighbor_ik.getPair();
denom += FastMath.exp(-(d_ik*d_ik)*sigmaSqrdInv);
if(neighbor_ik.getVector() == x_j)//intentionally doing object equals check - should be same object
{
jIsNearBy = true;//yay, dont have to compute the distance ourselves
numer = FastMath.exp(-(d_ik*d_ik) * sigmaSqrdInv);
}
}
if(!jIsNearBy)
{
double d_ij = dm.dist(i, j, vecs, accelCache);
numer = FastMath.exp(-(d_ij*d_ij) * sigmaSqrdInv);
}
return numer/(denom+1e-9);
}
private static double p_ij(int i, int j, double sigma_i, double sigma_j, List<List<? extends VecPaired<Vec, Double>>> neighbors, List<Vec> vecs, List<Double> accelCache, DistanceMetric dm)
{
return (p_j_i(j, i, sigma_i, neighbors, vecs, accelCache, dm)+p_j_i(i, j, sigma_j, neighbors, vecs, accelCache, dm))/(2*neighbors.size());
}
/**
* Computes the perplexity for the specified data point using the given sigma
* @param i the data point to get the perplexity of
* @param sigma the bandwidth to use
* @param neighbors the set of nearest neighbors to consider
* @return the perplexity 2<sup>H(P<sub>i</sub>)</sup>
*/
private static double perp(int i, int[][] nearMe, double sigma, List<List<? extends VecPaired<Vec, Double>>> neighbors, List<Vec> vecs, List<Double> accelCache, DistanceMetric dm)
{
//section 2 of Maaten, L. Van Der, & Hinton, G. (2008). Visualizing Data using t-SNE. Journal of Machine Learning Research, 9, 2579–2605.
double hp = 0;
for(int j_indx =0; j_indx < nearMe[i].length; j_indx++)
{
double p_ji = p_j_i(nearMe[i][j_indx], i, sigma, neighbors, vecs, accelCache, dm);
if (p_ji > 0)
hp += p_ji * FastMath.log2(p_ji);
}
hp *= -1;
return FastMath.pow2(hp);
}
private class Quadtree
{
public Node root;
public Quadtree(double[] z )
{
this.root = new Node();
this.root.minX = this.root.minY = Double.POSITIVE_INFINITY;
this.root.maxX = this.root.maxY = -Double.POSITIVE_INFINITY;
for(int i = 0; i < z.length/2; i++)
{
double x = z[i*2];
double y = z[i*2+1];
this.root.minX = Math.min(this.root.minX, x);
this.root.maxX = Math.max(this.root.maxX, x);
this.root.minY = Math.min(this.root.minY, y);
this.root.maxY = Math.max(this.root.maxY, y);
}
//done b/c we have <= on min, so to get the edge we need to be slightly larger
this.root.maxX = Math.nextUp(this.root.maxX);
this.root.maxY = Math.nextUp(this.root.maxY);
//nowe start inserting everything
for(int i = 0; i < z.length/2; i++)
root.insert(1, i, z);
}
private class Node implements Iterable<Node>
{
public int indx;
public double x_mass, y_mass;
public int N_cell;
public double minX, maxX, minY, maxY;
public Node NW, NE, SE, SW;
public Node()
{
indx = -1;
N_cell = 0;
x_mass = y_mass = 0;
NW = NE = SE = SW = null;
}
public Node(double minX, double maxX, double minY, double maxY)
{
this();
this.minX = minX;
this.maxX = maxX;
this.minY = minY;
this.maxY = maxY;
}
public boolean contains(int i, double[]z)
{
double x = z[i*2];
double y = z[i*2+1];
return minX <= x && x < maxX && minY <= y && y < maxY;
}
public void insert(int weight, int i, double[] z)
{
x_mass += z[i*2];
y_mass += z[i*2+1];
N_cell+=weight;
if(NW == null && indx < 0)//was empy, just set
indx = i;
else
{
if(indx >=0)
{
if(Math.abs(z[indx*2]- z[i*2]) < 1e-13 &&
Math.abs(z[indx*2+1]- z[i*2+1]) < 1e-13)
{
//near exact same value
//just let increase local weight indicate a "heavier" leaf
return;
}
}
if(NW == null)//we need to split
{
double w2 = (maxX-minX)/2;
double h2 = (maxY - minY)/2;
NW = new Node(minX, minX + w2, minY + h2, maxY);
NE = new Node(minX + w2, maxX, minY + h2, maxY);
SW = new Node(minX, minX + w2, minY, minY + h2);
SE = new Node(minX + w2, maxX, minY, minY + h2);
for(Node child : this)
if(child.contains(this.indx, z))
{
child.insert(this.N_cell, this.indx, z);
break;
}
indx = -1;
}
//and pass this along to our children
for(Node child : this)
if(child.contains(i, z))
{
child.insert(weight, i, z);
break;
}
}
}
public double diagLen()
{
double w = maxX-minX;
double h = maxY-minY;
return Math.sqrt(w*w+h*h);
}
@Override
public Iterator<Node> iterator()
{
if(NW == null)
return Collections.emptyIterator();
else
return Arrays.asList(NW, NE, SW, SE).iterator();
}
}
}
//Current implementation only supports 2D, so hard code it.
@Override
public int getTargetDimension()
{
return 2;
}
@Override
public boolean setTargetDimension(int target)
{
return target == 2;
}
}
| 25,442 | 35.295292 | 192 | java |
JSAT | JSAT-master/JSAT/src/jsat/datatransform/visualization/VisualizationTransform.java | /*
* Copyright (C) 2015 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.datatransform.visualization;
import java.io.Serializable;
import java.util.concurrent.ExecutorService;
import jsat.DataSet;
import jsat.datatransform.DataTransform;
/**
* Visualization Transform is similar to the {@link DataTransform} interface,
* except it can not necessarily be applied to new datapoints. Classes
* implementing this interface are intended to create 2D or 3D versions of a
* dataset that can be visualized easily.<br>
* <br>
* By default, all implementations will create a 2D projection of the data if
* supported.
*
* @author Edward Raff <[email protected]>
*/
public interface VisualizationTransform extends Cloneable, Serializable
{
/**
*
* @return the number of dimensions that a dataset will be embedded down to
*/
public int getTargetDimension();
/**
* Sets the target dimension to embed new dataset to. Many visualization
* methods may only support a target of 2 or 3 dimensions, or only one of
* those options. For that reason a boolean value will be returned
* indicating if the target size was acceptable. If not, no change to the
* object will occur.
*
* @param target the new target dimension size when {@link #transform(jsat.DataSet)
* } is called.
* @return {@code true} if this transform supports that dimension and it was
* set, {@code false} if the target dimension is unsupported and the
* previous value will be used instead.
*/
public boolean setTargetDimension(int target);
/**
* Transforms the given data set, returning a dataset of the same type.
*
* @param <Type> the dataset type
* @param d the data set to transform
* @return the lower dimension dataset for visualization.
*/
default public <Type extends DataSet> Type transform(DataSet<Type> d)
{
return transform(d, false);
}
/**
* Transforms the given data set, returning a dataset of the same type.
*
* @param <Type> the dataset type
* @param d the data set to transform
* @param parallel {@code true} if transform should be done in parallel, or
* {@code false} if it should use a single thread.
* @return the lower dimension dataset for visualization.
*/
public <Type extends DataSet> Type transform(DataSet<Type> d, boolean parallel);
}
| 3,104 | 36.409639 | 87 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/Beta.java |
package jsat.distributions;
import jsat.linear.Vec;
import static java.lang.Math.*;
import java.util.Random;
import static jsat.math.SpecialMath.*;
/**
*
* @author Edward Raff
*/
public class Beta extends ContinuousDistribution
{
private static final long serialVersionUID = 8001402067928143972L;
double alpha;
double beta;
public Beta(double alpha, double beta)
{
if(alpha <= 0)
throw new ArithmeticException("Alpha must be > 0, not " + alpha);
else if(beta <= 0)
throw new ArithmeticException("Beta must be > 0, not " + beta);
this.alpha = alpha;
this.beta = beta;
}
@Override
public double logPdf(double x)
{
if(x <= 0 || x >= 1)
return -Double.MAX_VALUE;
return (alpha-1)*log(x)+(beta-1)*log(1-x)-lnBeta(alpha, beta);
}
@Override
public double pdf(double x)
{
if(x <= 0)
return 0;
else if(x >= 1)
return 0;
return exp(logPdf(x));
}
@Override
public double cdf(double x)
{
if(x <= 0)
return 0;
else if(x >= 1)
return 1;
return betaIncReg(x, alpha, beta);
}
@Override
public double invCdf(double p)
{
if(p < 0 || p > 1)
throw new ArithmeticException("p must be in the range [0,1], not " + p);
return invBetaIncReg(p, alpha, beta);
}
@Override
public double min()
{
return 0;
}
@Override
public double max()
{
return 1;
}
@Override
public String getDistributionName()
{
return "Beta";
}
@Override
public String[] getVariables()
{
return new String[]{"alpha", "beta"};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[]{alpha, beta};
}
@Override
public void setVariable(String var, double value)
{
if (var.equals("alpha"))
if (value > 0)
alpha = value;
else
throw new RuntimeException("Alpha must be > 0, not " + value);
else if (var.equals("beta"))
if (value > 0)
beta = value;
else
throw new RuntimeException("Beta must be > 0, not " + value);
}
@Override
public ContinuousDistribution clone()
{
return new Beta(alpha, beta);
}
@Override
public void setUsingData(Vec data)
{
double mean = data.mean();
double var = data.variance();
//alpha = (mean^2 - mean^3 - mean * var) / var
alpha = (mean*mean-mean*mean*mean-mean*var)/var;
beta = (alpha-alpha*mean)/mean;
}
@Override
public double mean()
{
return alpha/(alpha+beta);
}
@Override
public double median()
{
return invBetaIncReg(0.5, alpha, beta);
}
@Override
public double mode()
{
if(alpha > 1 && beta > 1)
return (alpha-1)/(alpha+beta-2);
else
return Double.NaN;
}
@Override
public double variance()
{
return alpha*beta / (pow(alpha+beta, 2)*(alpha+beta+1));
}
@Override
public double skewness()
{
return 2*(beta-alpha)*sqrt(alpha+beta+1)/((alpha+beta+2)*sqrt(alpha*beta));
}
@Override
public double[] sample(int numSamples, Random rand)
{
double[] a = new Gamma(alpha, 1.0).sample(numSamples, rand);
double[] b = new Gamma(beta, 1.0).sample(numSamples, rand);
for(int i = 0; i < a.length; i++)
a[i] /= a[i] + b[i];
return a;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(alpha);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(beta);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Beta other = (Beta) obj;
if (Double.doubleToLongBits(alpha) != Double
.doubleToLongBits(other.alpha)) {
return false;
}
if (Double.doubleToLongBits(beta) != Double
.doubleToLongBits(other.beta)) {
return false;
}
return true;
}
}
| 4,676 | 21.485577 | 84 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/Cauchy.java |
package jsat.distributions;
import jsat.linear.Vec;
/**
*
* @author Edward Raff
*/
public final class Cauchy extends ContinuousDistribution
{
private static final long serialVersionUID = -5083645002030551206L;
private double location;
private double scale;
public Cauchy(double x0, double y)
{
setScale(y);
setLocation(x0);
}
public Cauchy()
{
this(0, 1);
}
public void setLocation(double x0)
{
this.location = x0;
}
public void setScale(double y)
{
if(y <= 0)
throw new ArithmeticException("The scale parameter must be > 0, not " + y);
this.scale = y;
}
public double getScale()
{
return scale;
}
public double getLocation()
{
return location;
}
@Override
public double pdf(double x)
{
return 1.0 / ( Math.PI*scale* (1 + Math.pow((x-location)/scale, 2)) );
}
@Override
public double cdf(double x)
{
return Math.atan((x-location)/scale)/Math.PI + 0.5;
}
@Override
public double invCdf(double p)
{
return location + scale * Math.tan( Math.PI * (p - 0.5) );
}
@Override
public double min()
{
return Double.NEGATIVE_INFINITY;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String getDistributionName()
{
return "Cauchy";
}
@Override
public String[] getVariables()
{
return new String[] {"x0", "y"};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[] {location, scale};
}
@Override
public void setVariable(String var, double value)
{
if(var.equals("y"))
setScale(value);
else if(var.equals("x0"))
setLocation(value);
}
@Override
public ContinuousDistribution clone()
{
return new Cauchy(location, scale);
}
@Override
public void setUsingData(Vec data)
{
data = data.sortedCopy();
//approximate y by taking | 1st quant - 3rd quantile|
int n = data.length();
setScale(Math.abs(data.get(n/4) - data.get(3*n/4)));
//approximate x by taking the median value
//Note, technicaly, any value is equaly likely to be the true median of a chachy distribution, so we dont care about the exact median
setLocation(data.get(n/2));
}
/**
* The Cauchy distribution is unique in that it does not have a mean value (undefined).
* @return {@link Double#NaN} since there is no mean value
*/
@Override
public double mean()
{
return Double.NaN;
}
@Override
public double median()
{
return location;
}
@Override
public double mode()
{
return location;
}
/**
* The Cauchy distribution is unique in that it does not have a variance value (undefined).
* @return {@link Double#NaN} since there is no variance value
*/
@Override
public double variance()
{
return Double.NaN;
}
/**
* The Cauchy distribution is unique in that it does not have a standard deviation value (undefined).
* @return {@link Double#NaN} since there is no standard deviation value
*/
@Override
public double standardDeviation()
{
return Double.NaN;
}
@Override
public double skewness()
{
return Double.NaN;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(location);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(scale);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Cauchy other = (Cauchy) obj;
if (Double.doubleToLongBits(location) != Double
.doubleToLongBits(other.location)) {
return false;
}
if (Double.doubleToLongBits(scale) != Double
.doubleToLongBits(other.scale)) {
return false;
}
return true;
}
}
| 4,357 | 19.556604 | 141 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/ChiSquared.java |
package jsat.distributions;
import jsat.linear.Vec;
import static java.lang.Math.*;
import java.util.Random;
import static jsat.math.SpecialMath.*;
/**
*
* @author Edward Raff
*/
public class ChiSquared extends ContinuousDistribution
{
private static final long serialVersionUID = 2446232102260721666L;
double df;//Degrees of freedom
public ChiSquared(double df) {
this.df = df;
}
@Override
public double pdf(double x)
{
if(x <= 0)
return 0;
/*
* df -x
* -- - 1 --
* 2 2
* x e
* -------------
* df
* --
* 2 /df\
* 2 Gamma|--|
* \ 2/
*/
return exp((df/2-1)*log(x)-x/2- (df/2*log(2)+lnGamma(df/2)) );
}
@Override
public double cdf(double x)
{
if(x <= 0)
return 0;
if(df == 2)//special case with a closed form that is more accurate to compute, we include it b/c df = 2 is not uncomon
return 1-exp(-x/2);
return gammaP(df/2, x/2);
}
@Override
public double invCdf(double p)
{
if(df == 2)//special case with a closed form that is more accurate to compute, we include it b/c df = 2 is not uncomon
return 2*abs(log(1-p));
return 2* invGammaP(p, df/2);
}
@Override
public double min()
{
return 0;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String getDistributionName()
{
return "Chi^2";
}
@Override
public String[] getVariables()
{
return new String[] {"df"};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[] {df};
}
@Override
public void setVariable(String var, double value)
{
if(var.equals("df"))
df = value;
}
@Override
public ContinuousDistribution clone()
{
return new ChiSquared(df);
}
@Override
public void setUsingData(Vec data)
{
df = ceil(data.variance()/2);
}
@Override
public double mean()
{
return df;
}
@Override
public double median()
{
//2*InvGammaP(df/2,1/2)
return invGammaP(0.5, df/2)*2;
}
@Override
public double mode()
{
return Math.max(df-2, 0.0);
}
@Override
public double variance()
{
return 2 * df;
}
@Override
public double skewness()
{
return sqrt(8/df);
}
@Override
public double[] sample(int numSamples, Random rand)
{
if(df == 2)
return super.sample(numSamples, rand);
//else, lets do different
double[] sample = new Gamma(df/2, 2).sample(numSamples, rand);
return sample;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(df);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
ChiSquared other = (ChiSquared) obj;
if (Double.doubleToLongBits(df) != Double.doubleToLongBits(other.df)) {
return false;
}
return true;
}
}
| 3,492 | 17.983696 | 126 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/ContinuousDistribution.java |
package jsat.distributions;
import jsat.linear.Vec;
import jsat.math.integration.Romberg;
import jsat.math.optimization.oned.GoldenSearch;
import jsat.math.rootfinding.Zeroin;
/**
* The ContinuousDistribution represents the contract for a continuous in one
* dimension.<br>
* <br>
* Many of the functions of a Continuous Distribution are implemented by default
* using numerical calculation and integration. For this reason, the base
* implementations may be slower or less accurate than desired - and could
* produce incorrect results for poorly behaved functions or large magnitude
* inputs. These base implementations are provided for easy completeness, but
* may not be appropriate for all methods. If needed, the implementer should
* check if these methods provide the needed level of accuracy and speed.
*
* @author Edward Raff
*/
public abstract class ContinuousDistribution extends Distribution
{
private static final long serialVersionUID = -5079392926462355615L;
/**
* Computes the log of the Probability Density Function. Note, that then the
* probability is zero, {@link Double#NEGATIVE_INFINITY} would be the true
* value. Instead, this method will always return the negative of
* {@link Double#MAX_VALUE}. This is to avoid propagating bad values through
* computation.
*
* @param x the value to get the log(PDF) of
* @return the value of log(PDF(x))
*/
public double logPdf(double x)
{
double pdf = pdf(x);
if(pdf <= 0)
return -Double.MAX_VALUE;
return Math.log(pdf);
}
/**
* Computes the value of the Probability Density Function (PDF) at the given point
* @param x the value to get the PDF
* @return the PDF(x)
*/
abstract public double pdf(double x);
@Override
public double cdf(double x)
{
double intMin = getIntegrationMin();
return Romberg.romb((z)->this.pdf(z), intMin, x);
}
@Override
public double invCdf(final double p)
{
if (p < 0 || p > 1)
throw new ArithmeticException("Value of p must be in the range [0,1], not " + p);
double a = getIntegrationMin();
double b = getIntegrationMax();
//default case, lets just do a root finding on the CDF for the specific value of p
return Zeroin.root(a, b, (x) -> this.cdf(x) - p);
}
@Override
public double mean()
{
double intMin = getIntegrationMin();
double intMax = getIntegrationMax();
return Romberg.romb((double x) -> x*pdf(x), intMin, intMax);
}
@Override
public double variance()
{
double intMin = getIntegrationMin();
double intMax = getIntegrationMax();
final double mean = mean();
return Romberg.romb((x)->Math.pow(x-mean, 2)*pdf(x), intMin, intMax);
}
@Override
public double skewness()
{
double intMin = getIntegrationMin();
double intMax = getIntegrationMax();
final double mean = mean();
return Romberg.romb((x)->Math.pow((x-mean), 3)*pdf(x), intMin, intMax)/Math.pow(variance(), 3.0/2);
}
@Override
public double mode()
{
double intMin = getIntegrationMin();
double intMax = getIntegrationMax();
return GoldenSearch.findMin(intMin, intMax, (x)->-pdf(x), 1e-6, 1000);
}
protected double getIntegrationMin()
{
double intMin = min();
if(Double.isInfinite(intMin))
{
intMin = -Double.MAX_VALUE/4;
//Lets find a suitbly small PDF starting value for this
//first, lets take big steps
for(int i = 0; i < 8; i++)
{
double sqrt = Math.sqrt(-intMin);
if(pdf(sqrt) < 1e-5)
intMin = -sqrt;
else
break;//no more big steps
}
//keep going until it looks like we should switch signs
while(pdf(intMin) < 1e-5 && intMin < -0.1)
{
intMin/=2;
}
if(pdf(intMin) < 1e-5)//still?
intMin *=-1;
//ok, search positive... keep multiplying till we get there
while(pdf(intMin) < 1e-5)
{
intMin *= 2;
}
}
return intMin;
}
protected double getIntegrationMax()
{
double intMax = max();
if(Double.isInfinite(intMax))
{
intMax = Double.MAX_VALUE / 4;
//Lets find a suitbly small PDF starting value for this
//first, lets take big steps
for (int i = 0; i < 8; i++)
{
double sqrt = Math.sqrt(intMax);
if (pdf(sqrt) < 1e-5)
intMax = sqrt;
else
break;//no more big steps
}
//keep going until it looks like we should switch signs
while (pdf(intMax) < 1e-5 && intMax > 0.1)
{
intMax /= 2;
}
if (pdf(intMax) < 1e-5)//still?
intMax *= -1;
//ok, search negative... keep multiplying till we get there
while (pdf(intMax) < 1e-5)
{
intMax *= 2;
}
}
return intMax;
}
/**
* The descriptive name of a distribution returns the name of the distribution, followed by the parameters of the distribution and their values.
* @return the name of the distribution that includes parameter values
*/
public String getDescriptiveName()
{
StringBuilder sb = new StringBuilder(getDistributionName());
sb.append("(");
String[] vars = getVariables();
double[] vals = getCurrentVariableValues();
sb.append(vars[0]).append(" = ").append(vals[0]);
for(int i = 1; i < vars.length; i++)
sb.append(", ").append(vars[i]).append(" = ").append(vals[i]);
sb.append(")");
return sb.toString();
}
/**
* Return the name of the distribution.
* @return the name of the distribution.
*/
abstract public String getDistributionName();
/**
* Returns an array, where each value contains the name of a parameter in the distribution.
* The order must always be the same, and match up with the values returned by {@link #getCurrentVariableValues() }
*
* @return a string of the variable names this distribution uses
*/
abstract public String[] getVariables();
/**
* Returns an array, where each value contains the value of a parameter in the distribution.
* The order must always be the same, and match up with the values returned by {@link #getVariables() }
* @return the current values of the parameters used by this distribution, in the same order as their names are returned by {@link #getVariables() }
*/
abstract public double[] getCurrentVariableValues();
/**
* Sets one of the variables of this distribution by the name.
* @param var the variable to set
* @param value the value to set
*/
abstract public void setVariable(String var, double value);
@Override
abstract public ContinuousDistribution clone();
/**
* Attempts to set the variables used by this distribution based on population sample data,
* assuming the sample data is from this type of distribution.
*
* @param data the data to use to attempt to fit against
*/
abstract public void setUsingData(Vec data);
@Override
public String toString()
{
return getDistributionName();
}
}
| 7,869 | 30.230159 | 152 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/Distribution.java | /*
* Copyright (C) 2015 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.distributions;
import java.io.Serializable;
import java.util.Random;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.math.Function;
import jsat.math.Function1D;
import jsat.math.rootfinding.Zeroin;
/**
* Base distribution class for distributions that have only one input.
*
* @author Edward Raff
*/
public abstract class Distribution implements Cloneable, Serializable
{
/**
* Computes the value of the Cumulative Density Function (CDF) at the given
* point. The CDF returns a value in the range [0, 1], indicating what
* portion of values occur at or below that point.
*
* @param x the value to get the CDF of
* @return the CDF(x)
*/
abstract public double cdf(double x);
/**
* Computes the inverse Cumulative Density Function (CDF<sup>-1</sup>) at
* the given point. It takes in a value in the range of [0, 1] and returns
* the value x, such that CDF(x) = <tt>p</tt>
*
* @param p the probability value
* @return the value such that the CDF would return <tt>p</tt>
*/
public double invCdf(double p)
{
if (p < 0 || p > 1)
throw new ArithmeticException("Value of p must be in the range [0,1], not " + p);
double a = Double.isInfinite(min()) ? Double.MIN_VALUE : min();
double b = Double.isInfinite(max()) ? Double.MAX_VALUE : max();
//default case, lets just do a root finding on the CDF for the specific value of p
return Zeroin.root(a, b, (x) -> cdf(x) - p);
}
/**
* Computes the mean value of the distribution
*
* @return the mean value of the distribution
*/
abstract public double mean();
/**
* Computes the median value of the distribution
*
* @return the median value of the distribution
*/
public double median()
{
return invCdf(0.5);
}
/**
* Computes the mode of the distribution. Not all distributions have a mode for all parameter values.
* {@link Double#NaN NaN} may be returned if the mode is not defined for the current values of the
* distribution.
*
* @return the mode of the distribution
*/
abstract public double mode();
/**
* Computes the variance of the distribution. Not all distributions have a
* finite variance for all parameter values. {@link Double#NaN NaN} may be
* returned if the variance is not defined for the current values of the distribution.
* {@link Double#POSITIVE_INFINITY Infinity} is a possible value to be returned
* by some distributions.
*
* @return the variance of the distribution.
*/
abstract public double variance();
/**
* Computes the skewness of the distribution. Not all distributions have a
* finite skewness for all parameter values. {@link Double#NaN NaN} may be
* returned if the skewness is not defined for the current values of the distribution.
*
* @return the skewness of the distribution.
*/
abstract public double skewness();
/**
* Computes the standard deviation of the distribution. Not all distributions have a
* finite standard deviation for all parameter values. {@link Double#NaN NaN} may be
* returned if the variance is not defined for the current values of the distribution.
* {@link Double#POSITIVE_INFINITY Infinity} is a possible value to be returned
* by some distributions.
*
* @return the standard deviation of the distribution
*/
public double standardDeviation()
{
return Math.sqrt(variance());
}
/**
* The minimum value for which the {@link #pdf(double) } is meant to return
* a value. Note that {@link Double#NEGATIVE_INFINITY} is a valid return
* value.
*
* @return the minimum value for which the {@link #pdf(double) } is meant to
* return a value.
*/
abstract public double min();
/**
* The maximum value for which the {@link #pdf(double) } is meant to return
* a value. Note that {@link Double#POSITIVE_INFINITY} is a valid return
* value.
*
* @return the maximum value for which the {@link #pdf(double) } is meant to
* return a value.
*/
abstract public double max();
/**
* This method returns a double array containing the values of random samples from this distribution.
*
* @param numSamples the number of random samples to take
* @param rand the source of randomness
* @return an array of the random sample values
*/
public double[] sample(int numSamples, Random rand)
{
double[] samples = new double[numSamples];
for(int i = 0; i < samples.length; i++)
samples[i] = invCdf(rand.nextDouble());
return samples;
}
/**
* This method returns a double array containing the values of random samples from this distribution.
*
* @param numSamples the number of random samples to take
* @param rand the source of randomness
* @return a vector of the random sample values
*/
public DenseVector sampleVec(int numSamples, Random rand)
{
return DenseVector.toDenseVec(sample(numSamples, rand));
}
@Override
abstract public Distribution clone();
}
| 6,064 | 33.657143 | 106 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/DistributionSearch.java |
package jsat.distributions;
import jsat.distributions.empirical.KernelDensityEstimator;
import jsat.linear.Vec;
import jsat.testing.goodnessoffit.KSTest;
/**
* Provides methods for selecting the distribution that best fits a given data set.
* @author Edward Raff
*/
public class DistributionSearch
{
private static ContinuousDistribution[] possibleDistributions = new ContinuousDistribution[]
{
new Normal(),
new LogNormal(), new Exponential(),
new Gamma(2, 1), new FisherSendor(10, 10), new Weibull(2, 1),
new Uniform(0, 1), new Logistic(3, 2), new MaxwellBoltzmann(),
new Pareto(), new Rayleigh(2)
};
/**
* Searches the distributions that are known for a possible fit, and returns
* what appears to be the best fit.
*
* @param v all the values from a sample
* @return the distribution that provides the best fit to the data that this method could find.
*/
public static ContinuousDistribution getBestDistribution(Vec v)
{
return getBestDistribution(v, possibleDistributions);
}
/**
* Searches the distributions that are known for a possible fit, and returns
* what appears to be the best fit. If no suitable fit can be found, a
* {@link KernelDensityEstimator} is fit to the data.
*
* @param v all the values from a sample
* @param KDECutOff the cut off value used for using the KDE. Should be in
* the range (0, 1). Values less than zero means the KDE will never be used,
* and greater then 1 means the KDE will always be used.
* @return the distribution that provides the best fit to the data that this method could find.
*/
public static ContinuousDistribution getBestDistribution(Vec v, double KDECutOff)
{
return getBestDistribution(v, KDECutOff, possibleDistributions);
}
/**
* Searches the distributions that are given for a possible fit, and returns
* what appears to be the best fit.
*
* @param v all the values from a sample
* @param possibleDistributions the array of distribution to try and fit to the data
* @return the distribution that provides the best fit to the data that this method could find.
*/
public static ContinuousDistribution getBestDistribution(Vec v, ContinuousDistribution... possibleDistributions)
{
return getBestDistribution(v, 0.0, possibleDistributions);
}
/**
* Searches the distributions that are given for a possible fit, and returns
* what appears to be the best fit. If no suitable fit can be found, a
* {@link KernelDensityEstimator} is fit to the data.
*
* @param v all the values from a sample
* @param KDECutOff the cut off value used for using the KDE. Should be in
* the range (0, 1). Values less than zero means the KDE will never be used,
* and greater then 1 means the KDE will always be used.
* @param possibleDistributions the array of distribution to try and fit to the data
* @return the distribution that provides the best fit to the data that this method could find.
*/
public static ContinuousDistribution getBestDistribution(Vec v, double KDECutOff, ContinuousDistribution... possibleDistributions)
{
if(v.length() == 0)
throw new ArithmeticException("Can not fit a distribution to an empty set");
//Thread Safety, clone the possible distributions
ContinuousDistribution[] possDistCopy = new ContinuousDistribution[possibleDistributions.length];
for(int i = 0; i < possibleDistributions.length; i++)
possDistCopy[i] = possibleDistributions[i].clone();
KSTest ksTest = new KSTest(v);
ContinuousDistribution bestDist = null;
double bestProb = 0;
for(ContinuousDistribution cd : possDistCopy)
{
try
{
cd.setUsingData(v);
double prob = ksTest.testDist(cd);
if(prob > bestProb)
{
bestDist = cd;
bestProb = prob;
}
}
catch(Exception ex)
{
}
}
///Return the best distribution, or if somehow everythign went wrong, a normal distribution
try
{
if(bestProb >= KDECutOff)
return bestDist == null ? new Normal(v.mean(), v.standardDeviation()) : bestDist.clone();
else
return new KernelDensityEstimator(v);
}
catch (RuntimeException ex)//Mostly likely occurs if all values are all zero
{
if(v.standardDeviation() == 0)
return null;
throw new ArithmeticException("Catistrophic faulure getting a distribution");
}
}
}
| 4,986 | 37.658915 | 134 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/Exponential.java | package jsat.distributions;
import static java.lang.Math.*;
import jsat.linear.Vec;
/**
*
* @author Edward Raff
*/
public class Exponential extends ContinuousDistribution
{
private static final long serialVersionUID = 1675502925386052588L;
private double lambda;
public Exponential()
{
this(1);
}
public Exponential(double lambda)
{
if(lambda <= 0)
throw new RuntimeException("The rate parameter must be greater than zero, not " + lambda);
this.lambda = lambda;
}
@Override
public double logPdf(double x)
{
if(x < 0)
return 0;
return log(lambda) + -lambda*x;
}
@Override
public double pdf(double d)
{
if(d < 0)
return 0;
return lambda*exp(-lambda*d);
}
@Override
public double cdf(double d)
{
if(d < 0)
return 0;
return 1-exp(-lambda*d);
}
@Override
public double invCdf(double d)
{
if(d < 0 || d > 1)
throw new ArithmeticException("Inverse CDF only exists on the range [0,1]");
return -log(1-d)/lambda;
}
@Override
public double min()
{
return 0;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String getDescriptiveName()
{
return "Exponential(\u03BB=" + lambda + ")";
}
@Override
public String getDistributionName()
{
return "Exponential";
}
@Override
public String[] getVariables()
{
return new String[] {"\u03BB"};
}
@Override
public void setVariable(String var, double value)
{
if(var.equals("\u03BB"))
{
if (value <= 0)
throw new RuntimeException("The rate parameter must be greater than zero");
lambda = value;
}
}
@Override
public ContinuousDistribution clone()
{
return new Exponential(lambda);
}
@Override
public void setUsingData(Vec data)
{
/**
* mean of an exponential distribution is lambda^-1
*/
lambda = 1/data.mean();
if(lambda <= 0)
lambda = 1;
}
@Override
public double[] getCurrentVariableValues()
{
return new double[] {lambda};
}
@Override
public double mean()
{
return 1/lambda;
}
@Override
public double median()
{
return 1/lambda * log(2);
}
@Override
public double mode()
{
return 0;
}
@Override
public double variance()
{
return pow(lambda, -2);
}
@Override
public double skewness()
{
return 2;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(lambda);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Exponential other = (Exponential) obj;
if (Double.doubleToLongBits(lambda) != Double
.doubleToLongBits(other.lambda)) {
return false;
}
return true;
}
}
| 3,344 | 16.983871 | 102 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/FisherSendor.java |
package jsat.distributions;
import jsat.linear.Vec;
import static java.lang.Math.*;
import static jsat.math.SpecialMath.*;
/**
*
* Also known as the F distribution.
*
* @author Edward Raff
*/
public class FisherSendor extends ContinuousDistribution
{
private static final long serialVersionUID = 7628304882101574242L;
double v1;
double v2;
public FisherSendor(double v1, double v2)
{
if(v1 <= 0)
throw new ArithmeticException("v1 must be > 0 not " + v1 );
if(v2 <= 0)
throw new ArithmeticException("v2 must be > 0 not " + v2 );
this.v1 = v1;
this.v2 = v2;
}
@Override
public double logPdf(double x)
{
if(x <= 0)
return 0;
double leftSide = v1/2 * log(v1) + v2/2*log(v2) - lnBeta(v1/2, v2/2);
double rightSide = (v1/2-1)*log(x) - (v1+v2)/2*log(v2+v1*x);
return leftSide+rightSide;
}
@Override
public double pdf(double x)
{
if(x <= 0)
return 0;
return exp(logPdf(x));
}
@Override
public double cdf(double x)
{
if(x <= 0)
return 0;
return betaIncReg(v1*x / (v1*x + v2), v1/2, v2/2);
}
@Override
public double invCdf(double p)
{
if(p < 0 || p > 1)
throw new ArithmeticException("Probability must be in the range [0,1], not" + p);
double u = invBetaIncReg(p, v1/2, v2/2);
return v2*u/(v1*(1-u));
}
@Override
public double min()
{
return 0;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String getDistributionName()
{
return "F";
}
@Override
public String[] getVariables()
{
return new String[]{"v1", "v2"};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[]{v1, v2};
}
@Override
public void setVariable(String var, double value)
{
if (var.equals("v1"))
if (value > 0)
v1 = value;
else
throw new ArithmeticException("v1 must be > 0 not " + value);
else if (var.equals("v2"))
if (value > 0)
v2 = value;
else
throw new ArithmeticException("v2 must be > 0 not " + value );
}
@Override
public ContinuousDistribution clone()
{
return new FisherSendor(v1, v2);
}
@Override
public void setUsingData(Vec data)
{
double mu = data.mean();
//Only true if v2 > 2
double tmp = 2*mu / (-1 + mu);
if(tmp < 2)
{
return;//We couldnt approximate anything
}
else
{
v2 = tmp;
if(v2 < 4)
return;//We cant approximate v1
}
//only true if v2 > 4
double v2sqr = v2*v2;
double var = data.variance();
double denom = -2*v2sqr - 16*var + 20*v2*var - 8*v2sqr*var + v2sqr*v2*var;
v1 = 2*(-2*v2sqr + v2sqr*v2)/denom;
}
@Override
public double mean()
{
if(v2 <= 2)
return Double.NaN;
return v2/(v2-2);
}
@Override
public double median()
{
return (v2/v1)*(1.0/invBetaIncReg(0.5, v2/2, v1/2)-1);
}
@Override
public double mode()
{
if(v1 <= 2)
return Double.NaN;
return (v1-2)/v1*v2/(v2+2);
}
@Override
public double variance()
{
if(v2 <= 4)
return Double.NaN;
return 2 * v2*v2*(v1+v2-2) / (v1*pow(v2-2,2)*(v2-4));
}
@Override
public double skewness()
{
if(v2 <= 6)//Does not have a skewness for d2 <= 6
return Double.NaN;
double num = (2*v1+v2-2)*sqrt(8*(v2-4));
double denom = (v2-6)*sqrt(v1*(v1+v2-2));
return num/denom;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(v1);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(v2);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
FisherSendor other = (FisherSendor) obj;
if (Double.doubleToLongBits(v1) != Double.doubleToLongBits(other.v1)) {
return false;
}
if (Double.doubleToLongBits(v2) != Double.doubleToLongBits(other.v2)) {
return false;
}
return true;
}
}
| 4,813 | 19.930435 | 93 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/Gamma.java |
package jsat.distributions;
import jsat.linear.Vec;
import static java.lang.Math.*;
import java.util.Random;
import static jsat.math.SpecialMath.*;
/**
*
* @author Edward Raff
*/
public class Gamma extends ContinuousDistribution
{
private static final long serialVersionUID = 6380493734491674483L;
private double k;
private double theta;
public Gamma(double k, double theta)
{
this.k = k;
this.theta = theta;
}
@Override
public double pdf(double x)
{
if(x < 0)
return 0;
return exp(logPdf(x));
}
@Override
public double logPdf(double x)
{
/*
* k - 1 / -x \
* x exp|-----|
* \theat/
* -----------------
* k
* Gamma(k) theta
*/
double p1 = -k *log(theta);
double p2 = k*log(x);
double p3 = -lnGamma(k);
double p4 = -x/theta;
double p5 = -log(x);
double pdf = p1+p2+p3+p4+p5;
if(Double.isNaN(pdf) || Double.isInfinite(pdf))//Bad extreme values when x is very small
return -Double.MAX_VALUE;
return pdf;
}
@Override
public double cdf(double x)
{
if(x < 0)
throw new ArithmeticException("CDF goes from 0 to Infinity, " + x + " is invalid");
return gammaP(k, x/theta);
}
@Override
public double invCdf(double p)
{
return invGammaP(p, k)*theta;
}
@Override
public double min()
{
return 0;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String getDistributionName()
{
return "Gamma";
}
@Override
public String[] getVariables()
{
return new String[] {"k", "theta"};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[] {k, theta};
}
@Override
public void setVariable(String var, double value)
{
if(var.equals("k"))
k = value;
else if(var.equals("theta"))
theta = value;
}
@Override
public ContinuousDistribution clone()
{
return new Gamma(k, theta);
}
@Override
public void setUsingData(Vec data)
{
/*
* Using:
* mean = k*theat
* variance = k*theta^2
*
* k*theta^2 / (k*theta) = theta^2/theta = theta = mean/variance
*
*/
theta = data.variance()/data.mean();
k = data.mean()/theta;
}
@Override
public double mean()
{
return k * theta;
}
@Override
public double median()
{
return invGammaP(k, 0.5)*theta;
}
@Override
public double mode()
{
if(k < 1)
throw new ArithmeticException("No mode for k < 1");
return (k-1)*theta;
}
@Override
public double variance()
{
return k * theta*theta;
}
@Override
public double skewness()
{
return 2 / sqrt(k);
}
@Override
public double[] sample(int numSamples, Random rand)
{
/**
* See: Marsaglia, George, and Wai Wan Tsang. “A Simple Method for
* Generating Gamma Variables.” ACM Trans. Math. Softw. 26, no. 3
* (September 2000): 363–72. https://doi.org/10.1145/358407.358414.
*/
double[] toRet = new double[numSamples];
if (k >= 1.0)
{
double d = k - 1.0/3.0;
double c = 1.0/sqrt(9.0*d);
for(int i = 0; i < toRet.length; i++)
{
while(true)
{
double x = 0, xSqrd = 0;
double v = 0;
while (v <= 0.0)
{
x = rand.nextGaussian();
v = 1 + c*x;
}
v = v*v*v;
double u = rand.nextDouble();
xSqrd = x*x;
//Squeeze check done first to avoid expensieve logs
double squeezeCheck = 1.0 - 0.0331 * xSqrd * xSqrd;
if (u < squeezeCheck)
{
toRet[i] = theta*d*v;
break;
}//fail, now try logs if we must
else if( log(u) < 0.5 * xSqrd + d * (1.0 - v + log(v)))
{
toRet[i] = theta*d*v;
break;
}
}
}
}
else
{
Gamma shifted = new Gamma(k+1, 1.0);
double[] gs = shifted.sample(numSamples, rand);
for(int i = 0; i < toRet.length; i++)
toRet[i] = theta*gs[i]*pow(rand.nextDouble(), 1.0/k);
}
return toRet;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(k);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(theta);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Gamma other = (Gamma) obj;
if (Double.doubleToLongBits(k) != Double.doubleToLongBits(other.k)) {
return false;
}
if (Double.doubleToLongBits(theta) != Double
.doubleToLongBits(other.theta)) {
return false;
}
return true;
}
}
| 5,795 | 21.206897 | 96 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/Kolmogorov.java |
package jsat.distributions;
import jsat.linear.Vec;
import jsat.math.Function;
import jsat.math.rootfinding.RiddersMethod;
import static java.lang.Math.*;
import jsat.math.rootfinding.Zeroin;
/**
*
* @author Edward Raff
*/
public class Kolmogorov extends ContinuousDistribution
{
private static final long serialVersionUID = 7319511918364286930L;
public Kolmogorov()
{
}
@Override
public double pdf(double x)
{
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public double cdf(double x)
{
if(x < 0)
throw new ArithmeticException("Invalid value of x, x must be > 0, not " + x);
else if(x == 0)
return 0;
else if(x >= 5)//By this point, floating point isnt accurate enough to distinguish between 1.0 and the true value.
return 1;
/*
* Uses 2 formulas, see http://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test#Kolmogorov_distribution
*
* Each formula converges very rapidly, interface 3 terms to full
* IEEE precision for one or the other - crossover point is 1.18
* according to Numerical Recipies, 3rd Edition p(334-335)
*/
double tmp = 0;
double x2 = x*x;
if(x < 1.18)
{
for(int j = 1; j <= 3; j++ )
tmp += exp( -pow(2*j-1,2)*PI*PI / (8*x2) );
return sqrt(2*PI)/x *tmp;
}
else
{
// for(int j = 1; j <= 3; j++ )
// tmp += exp(-2*j*j*x*x)*pow(-1,j-1);
tmp = exp(-2*x2) + exp(-18*x2) - exp(-8*x2);//In order of 1st, 3rd, and 2nd to reduce chances of cancelation
return 1 - 2*tmp;
}
}
@Override
public double invCdf(double p)
{
// return RiddersMethod.root(0, 5, fCDF, p, p);
return Zeroin.root(0.0, 5.0, (x) -> this.cdf(x) - p);
}
@Override
public double min()
{
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public double max()
{
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public String getDistributionName()
{
return "Kolmogorov";
}
@Override
public String[] getVariables()
{
return new String[]{};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[]{};
}
@Override
public void setVariable(String var, double value)
{
}
@Override
public ContinuousDistribution clone()
{
return new Kolmogorov();
}
@Override
public void setUsingData(Vec data)
{
}
@Override
public double mean()
{
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public double median()
{
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public double mode()
{
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public double variance()
{
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public double skewness()
{
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public int hashCode() {
return 31;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
return true;
}
}
| 3,723 | 20.402299 | 122 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/Kumaraswamy.java | /*
* Copyright (C) 2018 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.distributions;
import jsat.linear.Vec;
import static java.lang.Math.*;
import static jsat.math.SpecialMath.*;
/**
*
* @author Edward Raff
*/
public class Kumaraswamy extends ContinuousDistribution
{
double a;
double b;
public Kumaraswamy(double a, double b)
{
this.a = a;
this.b = b;
}
public Kumaraswamy()
{
this(1, 1);
}
@Override
public double logPdf(double x)
{
if(x <= 0 || x >= 1)
return -Double.MAX_VALUE;
//Log pdf is : Log[a] + Log[b] + (-1 + a) Log[x] + (-1 + b) Log[1 - x^a]
double log_x = log(x);
return log(a) + log(b) + (a-1)*log_x + (b-1)*log(1-exp(a*log_x));
}
@Override
public double pdf(double x)
{
if(x <= 0 || x >= 1)
return 0;
return exp(logPdf(x));
}
@Override
public double cdf(double x)
{
return 1 - exp(b*log(1-pow(x, a)));
}
@Override
public String getDistributionName()
{
return "Kumaraswamy";
}
@Override
public String[] getVariables()
{
return new String[]{"alpha", "beta"};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[]{a, b};
}
@Override
public void setVariable(String var, double value)
{
if (var.equals("alpha"))
if (value > 0)
a = value;
else
throw new RuntimeException("Alpha must be > 0, not " + value);
else if (var.equals("beta"))
if (value > 0)
b = value;
else
throw new RuntimeException("Beta must be > 0, not " + value);
}
@Override
public double mean()
{
return b*beta(1+1/a, b);
}
@Override
public double variance()
{
return b*beta(1+2/a, b) - pow(b*beta(1+1/a, b), 2);
}
@Override
public double skewness()
{
//see https://stdlib.io/develop/docs/api/@stdlib/math/base/dists/kumaraswamy/skewness/
double m3 = b*beta(1+3/a, b);
double var = variance();
double mean = mean();
return (m3-3*mean*var-pow(mean, 3))/pow(var, 3./2);
}
@Override
public double mode()
{
if(a < 1 || b < 1 || (a == 1 && a == b))
return Double.NaN;//Noe mode exists
return pow((a-1)/(a*b-1), 1/a);
}
@Override
public Kumaraswamy clone()
{
return new Kumaraswamy(a, b);
}
@Override
public void setUsingData(Vec data)
{
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public double min()
{
return 0;
}
@Override
public double max()
{
return 1;
}
}
| 3,574 | 21.34375 | 135 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/Laplace.java |
package jsat.distributions;
import jsat.linear.Vec;
import jsat.text.GreekLetters;
import static java.lang.Math.*;
/**
*
* @author Edward Raff
*/
public final class Laplace extends ContinuousDistribution
{
private static final long serialVersionUID = -4799360517803678236L;
/**
* location
*/
private double mu;
/*
* Scale
*/
private double b;
public Laplace(double mu, double b)
{
setB(b);
setMu(mu);
}
public void setMu(double mu)
{
this.mu = mu;
}
public double getMu()
{
return mu;
}
public void setB(double b)
{
if (b <= 0)
throw new ArithmeticException("The scale parameter must be > 0");
this.b = b;
}
public double getB()
{
return b;
}
@Override
public double pdf(double x)
{
return 1/(2*b)*exp(-abs(x-mu)/b);
}
@Override
public double cdf(double x)
{
double xMu = x - mu;
return 0.5 * (1 + signum(x)*(1-exp(-abs(xMu)/b)) );
}
@Override
public double invCdf(double p)
{
return mu - b * signum(p - 0.5) * log(1-2*abs(p-0.5));
}
@Override
public double min()
{
return Double.NEGATIVE_INFINITY;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String getDistributionName()
{
return "Laplace";
}
@Override
public String[] getVariables()
{
return new String[] {GreekLetters.mu, "b"};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[] {mu, b};
}
@Override
public void setVariable(String var, double value)
{
if(var.equals(GreekLetters.mu))
setMu(value);
else if(var.equals("b"))
setB(value);
}
@Override
public ContinuousDistribution clone()
{
return new Laplace(mu, b);
}
@Override
public void setUsingData(Vec data)
{
//Donst sent mu yet incase b turns out to be a bad value
double tmpMu = data.mean();
double newB = 0;
//TODO add APIs so that sparce vector can do this more efficiently
for(int i = 0; i < data.length(); i++)
newB += abs(data.get(i) - tmpMu);
newB /= data.length();
setB(newB);
setMu(tmpMu);
}
@Override
public double mean()
{
return mu;
}
@Override
public double median()
{
return mu;
}
@Override
public double mode()
{
return mu;
}
@Override
public double variance()
{
return 2 * b*b;
}
@Override
public double skewness()
{
return 0;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(b);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(mu);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Laplace other = (Laplace) obj;
if (Double.doubleToLongBits(b) != Double.doubleToLongBits(other.b)) {
return false;
}
if (Double.doubleToLongBits(mu) != Double.doubleToLongBits(other.mu)) {
return false;
}
return true;
}
}
| 3,589 | 16.598039 | 77 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/Levy.java |
package jsat.distributions;
import static java.lang.Math.*;
import jsat.linear.Vec;
import jsat.math.SpecialMath;
/**
* Implementation of the
* <a href="http://en.wikipedia.org/wiki/L%C3%A9vy_distribution">Levy
ContinuousDistribution </a>
*
* @author Edward Raff
*/
public class Levy extends ContinuousDistribution
{
private static final long serialVersionUID = 3132169946527422816L;
private double location;
private double scale;
private double logScale;
public Levy(double scale, double location)
{
setScale(scale);
setLocation(location);
}
/**
* Sets the scale of the Levy distribution
* @param scale the new scale value, must be positive
*/
public void setScale(double scale)
{
if(scale <= 0 || Double.isNaN(scale) || Double.isInfinite(scale))
throw new ArithmeticException("Scale must be a positive value, not " + scale);
this.scale = scale;
this.logScale = log(scale);
}
/**
* Returns the scale parameter used by this distribution
* @return the scale parameter
*/
public double getScale()
{
return scale;
}
/**
* Sets location of the Levy distribution.
* @param location the new location
*/
public void setLocation(double location)
{
if(Double.isNaN(location) || Double.isInfinite(location))
throw new ArithmeticException("location must be a real number");
this.location = location;
}
/**
* Returns the location parameter used by this distribution.
* @return distribution
*/
public double getLocation()
{
return location;
}
@Override
public double pdf(double x)
{
if(x < location)
return 0;
return exp(logPdf(x));
}
@Override
public double logPdf(double x)
{
if(x < location)
return Double.NEGATIVE_INFINITY;
final double mu = x-location;
return -(-mu*logScale+scale+3*mu*log(mu)+mu*log(PI)+mu*log(2))/(2*mu);
}
@Override
public double cdf(double x)
{
if(x < location)
return 0;
return SpecialMath.erfc(sqrt(scale/(2*(x-location))));
}
@Override
public double invCdf(double p)
{
if(p < 0 || p > 1)
throw new ArithmeticException("Invalid probability " + p);
return scale/(2*pow(SpecialMath.invErfc(p), 2))+location;
}
@Override
public double min()
{
return location;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String getDistributionName()
{
return "Levy";
}
@Override
public String[] getVariables()
{
return new String[]{"Scale", "Location"};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[] {scale, location};
}
@Override
public void setVariable(String var, double value)
{
if(var.equals(getVariables()[0]))
setScale(value);
else if(var.equals(getVariables()[1]))
setLocation(value);
}
@Override
public Levy clone()
{
return new Levy(scale, location);
}
@Override
public void setUsingData(Vec data)
{
setLocation(data.min());
setScale(2*pow(SpecialMath.invErfc(0.5), 2)*(data.median()-location));
}
@Override
public double mean()
{
return Double.POSITIVE_INFINITY;
}
@Override
public double mode()
{
return scale/3+location;
}
@Override
public double standardDeviation()
{
return Double.POSITIVE_INFINITY;
}
@Override
public double variance()
{
return Double.POSITIVE_INFINITY;
}
@Override
public double skewness()
{
return Double.NaN;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(location);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(scale);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Levy other = (Levy) obj;
if (Double.doubleToLongBits(location) != Double
.doubleToLongBits(other.location)) {
return false;
}
if (Double.doubleToLongBits(scale) != Double
.doubleToLongBits(other.scale)) {
return false;
}
return true;
}
}
| 4,731 | 20.219731 | 90 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/LogNormal.java |
package jsat.distributions;
import jsat.linear.Vec;
import static java.lang.Math.*;
import static jsat.math.SpecialMath.*;
import jsat.text.GreekLetters;
/**
*
* @author Edward Raff
*/
public class LogNormal extends ContinuousDistribution
{
private static final long serialVersionUID = -6938582328705527274L;
double mu;
double sig;
public LogNormal()
{
this(0, 1);
}
public LogNormal(double mu, double sig)
{
this.mu = mu;
this.sig = sig;
}
@Override
public double pdf(double x)
{
if(x <= 0)
return 0;
double num = exp(-pow(log(x)-mu, 2)/(2*sig*sig));
double denom = x*sqrt(2*PI*sig*sig);
return num/denom;
}
@Override
public double cdf(double x)
{
if(x <= 0)
return 0;
return 0.5 + 0.5*erf( (log(x)-mu)/sqrt(2*sig*sig) );
}
@Override
public double invCdf(double p)
{
double expo = mu+sqrt(2)*sqrt(sig*sig)*invErf(2*p-1.0);
return exp(expo);
}
@Override
public double min()
{
return 0;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String getDistributionName()
{
return "LogNormal";
}
@Override
public String[] getVariables()
{
return new String[]{GreekLetters.mu , GreekLetters.sigma};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[]{mu, sig};
}
@Override
public void setVariable(String var, double value)
{
if(var.equals(GreekLetters.mu))
mu = value;
else if(var.equals(GreekLetters.sigma))
if(value <= 0)
throw new ArithmeticException("Standard deviation must be > 0, not " + value );
else
sig = value;
}
@Override
public ContinuousDistribution clone()
{
return new LogNormal(mu, sig);
}
@Override
public void setUsingData(Vec data)
{
double mean = data.mean();
double var = data.variance();
mu = log(mean) - 0.5*log(1 + var/(mean*mean));
sig = sqrt(1 + var/(mean*mean));
}
@Override
public double mean()
{
return exp(mu + sig*sig*0.5);
}
@Override
public double median()
{
return exp(mu);
}
@Override
public double mode()
{
return exp(mu-sig*sig);
}
@Override
public double variance()
{
return expm1(sig*sig)*exp(2*mu+sig*sig);
}
@Override
public double skewness()
{
return (exp(sig*sig)+2)*sqrt(expm1(sig*sig));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(mu);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(sig);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
LogNormal other = (LogNormal) obj;
if (Double.doubleToLongBits(mu) != Double.doubleToLongBits(other.mu)) {
return false;
}
if (Double.doubleToLongBits(sig) != Double.doubleToLongBits(other.sig)) {
return false;
}
return true;
}
}
| 3,495 | 18.103825 | 95 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/LogUniform.java | /*
* Copyright (C) 2015 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.distributions;
import jsat.linear.Vec;
/**
* The Log Uniform distribution is such that if X is the distribution, then Y =
* log(X) is uniformly distributed. Because of this log term, this distribution
* can only take values in a positive range.
*
* @author Edward Raff <[email protected]>
*/
public class LogUniform extends ContinuousDistribution
{
private double min, max;
private double logMin, logMax;
private double logDiff;
private double diff;
/**
* Creates a new Log Uniform distribution between 1e-2 and 1
*/
public LogUniform()
{
this(1e-2, 1);
}
/**
* Creates a new Log Uniform distribution
*
* @param min the minimum value to be returned by this distribution
* @param max the maximum value to be returned by this distribution
*/
public LogUniform(double min, double max)
{
setMinMax(min, max);
}
/**
* Sets the minimum and maximum values for this distribution
* @param min the minimum value, must be positive
* @param max the maximum value, must be larger than {@code min}
*/
public void setMinMax(double min, double max)
{
if(min <= 0 || Double.isNaN(min) || Double.isInfinite(min))
throw new IllegalArgumentException("min value must be positive, not " + min);
else if(min >= max || Double.isNaN(max) || Double.isInfinite(max))
throw new IllegalArgumentException("max (" + max + ") must be larger than min (" + min+")" );
this.max = max;
this.min = min;
this.logMax = Math.log(max);
this.logMin = Math.log(min);
this.logDiff = logMax-logMin;
this.diff = max-min;
}
@Override
public double pdf(double x)
{
if(x < min)
return 0;
else if(x > max)
return 0;
else
return 1.0/(x*(logMax-logMin));
}
@Override
public String getDistributionName()
{
return "LogUniform";
}
@Override
public String[] getVariables()
{
return new String[]{"min", "max"};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[]{min, max};
}
@Override
public void setVariable(String var, double value)
{
if(var.equals("min"))
setMinMax(value, max);
else if(var.equals("max"))
setMinMax(min, value);
}
@Override
public LogUniform clone()
{
return new LogUniform(min, max);
}
@Override
public void setUsingData(Vec data)
{
//probably could do way better, but whatever
double guessMin = data.min();
double guessMax = data.max();
setMinMax(Math.max(guessMin, 1e-10), guessMax);
}
@Override
public double cdf(double x)
{
if(x < min)
return 0;
else if(x > max)
return 1;
else
return (Math.log(x)-logMin)/(logDiff);
}
@Override
public double invCdf(double p)
{
if(p < 0 || p > 1 || Double.isNaN(p))
throw new IllegalArgumentException("p must be in [0,1], not " + p);
return Math.exp(p*logMax-p*logMin)*min;
}
@Override
public double mean()
{
return (diff)/(logDiff);
}
@Override
public double median()
{
return Math.sqrt(min)*Math.sqrt(max);
}
@Override
public double mode()
{
return min();
}
@Override
public double variance()
{
return (max*max-min*min)/(2*logDiff) - diff*diff/(logDiff*logDiff);
}
@Override
public double skewness()
{
return Double.NaN;//TODO derive
}
@Override
public double min()
{
return min;
}
@Override
public double max()
{
return max;
}
}
| 4,618 | 23.569149 | 105 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/Logistic.java |
package jsat.distributions;
import jsat.linear.Vec;
import jsat.math.TrigMath;
import jsat.text.GreekLetters;
/**
*
* @author Edward Raff
*/
public final class Logistic extends ContinuousDistribution
{
private static final long serialVersionUID = -8720773286818833591L;
/**
* Location
*/
private double mu;
/**
* Scale
*/
private double s;
public Logistic(double mu, double s)
{
this.mu = mu;
setS(s);
}
public double getS()
{
return s;
}
public double getMu()
{
return mu;
}
public void setMu(double mu)
{
this.mu = mu;
}
public void setS(double s)
{
if(s <= 0)
throw new ArithmeticException("The scale parameter must be > 0, not " + s);
this.s = s;
}
@Override
public double pdf(double x)
{
return 1/(4*s) * Math.pow(TrigMath.sech( (x-mu) / (2*s)), 2);
}
@Override
public double cdf(double x)
{
return 0.5 + 0.5 * Math.tanh( (x-mu)/(2*s));
}
@Override
public double invCdf(double p)
{
return mu + s * Math.log( p /(1-p));
}
@Override
public double min()
{
return Double.NEGATIVE_INFINITY;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String getDistributionName()
{
return "Logistic";
}
@Override
public String[] getVariables()
{
return new String[] {GreekLetters.mu, "s"};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[]{mu, s};
}
@Override
public void setVariable(String var, double value)
{
if(var.equals(GreekLetters.mu))
setMu(value);
else if(var.equals("s"))
setS(value);
}
@Override
public ContinuousDistribution clone()
{
return new Logistic(mu, s);
}
@Override
public void setUsingData(Vec data)
{
double newS = data.variance()*(3/(Math.PI*Math.PI));
newS = Math.sqrt(newS);
setS(newS);
setMu(data.mean());
}
@Override
public double mean()
{
return mu;
}
@Override
public double median()
{
return mu;
}
@Override
public double mode()
{
return mu;
}
@Override
public double variance()
{
return Math.PI*Math.PI/3*s*s;
}
@Override
public double skewness()
{
return 0;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(mu);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(s);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Logistic other = (Logistic) obj;
if (Double.doubleToLongBits(mu) != Double.doubleToLongBits(other.mu)) {
return false;
}
if (Double.doubleToLongBits(s) != Double.doubleToLongBits(other.s)) {
return false;
}
return true;
}
}
| 3,333 | 16.547368 | 87 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/MaxwellBoltzmann.java |
package jsat.distributions;
import jsat.linear.Vec;
import jsat.text.GreekLetters;
import static java.lang.Math.*;
import static jsat.math.SpecialMath.*;
/**
*
* @author Edward Raff
*/
public class MaxwellBoltzmann extends ContinuousDistribution
{
private static final long serialVersionUID = -8273087046831433430L;
/**
* shape
*/
double sigma;
public MaxwellBoltzmann()
{
this(1);
}
public MaxwellBoltzmann(double sigma)
{
setShape(sigma);
}
final public void setShape(double sigma)
{
if(sigma <= 0 || Double.isInfinite(sigma) || Double.isNaN(sigma))
throw new ArithmeticException("shape parameter must be > 0, not " + sigma);
this.sigma = sigma;
}
@Override
public double logPdf(double x)
{
if(x <=0 )
return 0.0;
return (2*log(x) + (-x*x/(2*sigma*sigma)) - 3*log(sigma) )+ 0.5*(log(2)-log(PI));
}
@Override
public double pdf(double x)
{
if(x <= 0)
return 0;
double x2 = x*x;
return sqrt(2/PI)*x2*exp(-x2/(2*sigma*sigma))/(sigma*sigma*sigma);
}
@Override
public double cdf(double x)
{
if(x <=0 )
return 0.0;
return erf(x/(sqrt(2)*sigma))-sqrt(2/PI)*x*exp(-(x*x)/(2*sigma*sigma))/sigma;
}
@Override
public double invCdf(double p)
{
if(p < 0 || p > 1)
throw new ArithmeticException("probability must be in the range [0,1], not " + p);
return sqrt(2)*sigma*sqrt(invGammaP(p, 3.0/2.0));
}
@Override
public double median()
{
return sigma*sqrt(2*invGammaP(1.0/2.0, 3.0/2.0));
}
@Override
public double min()
{
return 0;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String getDistributionName()
{
return "Maxwell–Boltzmann";
}
@Override
public String[] getVariables()
{
return new String[] {GreekLetters.sigma};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[] {sigma};
}
@Override
public void setVariable(String var, double value)
{
if(var.equals(GreekLetters.sigma))
setShape(value);
}
@Override
public ContinuousDistribution clone()
{
return new MaxwellBoltzmann(sigma);
}
@Override
public void setUsingData(Vec data)
{
setShape(data.mean()/sqrt(2));
}
@Override
public double mean()
{
return 2*sqrt(2/PI)*sigma;
}
@Override
public double mode()
{
return sqrt(2)*sigma;
}
@Override
public double variance()
{
return sigma*sigma*(3*PI-8)/PI;
}
@Override
public double skewness()
{
return 2*sqrt(2)*(16-5*PI)/pow(3*PI-8, 3.0/2.0);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(sigma);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
MaxwellBoltzmann other = (MaxwellBoltzmann) obj;
if (Double.doubleToLongBits(sigma) != Double
.doubleToLongBits(other.sigma)) {
return false;
}
return true;
}
}
| 3,534 | 18.530387 | 94 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/Normal.java | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package jsat.distributions;
import static java.lang.Math.*;
import jsat.linear.Vec;
import jsat.text.GreekLetters;
/**
*
* @author Edward Raff
*/
public class Normal extends ContinuousDistribution
{
private static final long serialVersionUID = -5298346576152986165L;
private double mean;
private double stndDev;
public Normal()
{
this(0, 1);
}
public Normal(double mean, double stndDev)
{
if(stndDev <= 0)
throw new RuntimeException("Standerd deviation of the normal distribution needs to be greater than zero");
setMean(mean);
setStndDev(stndDev);
}
public void setMean(double mean)
{
if(Double.isInfinite(mean) || Double.isNaN(mean))
throw new ArithmeticException("Mean can not be infinite of NaN");
this.mean = mean;
}
public void setStndDev(double stndDev)
{
if(Double.isInfinite(stndDev) || Double.isNaN(stndDev))
throw new ArithmeticException("Standard devation can not be infinite of NaN");
if(stndDev <= 0)
throw new ArithmeticException("The standard devation can not be <= 0");
this.stndDev = stndDev;
}
public static double cdf(double x, double mu, double sigma)
{
if (Double.isNaN(x) || Double.isInfinite(x))
throw new ArithmeticException("X is not a real number");
return cdfApproxMarsaglia2004(zTransform(x, mu, sigma));
}
public double cdf(double x)
{
return cdf(x, mean, stndDev);
}
public static double invcdf(double x, double mu, double sigma)
{
if(x < 0 || x > 1)
throw new RuntimeException("Inverse of a probability requires a probablity in the range [0,1], not " + x);
//http://home.online.no/~pjacklam/notes/invnorm/
double a[] =
{
-3.969683028665376e+01,2.209460984245205e+02,
-2.759285104469687e+02,1.383577518672690e+02,
-3.066479806614716e+01,2.506628277459239e+00
};
double b[] =
{
-5.447609879822406e+01,1.615858368580409e+02,
-1.556989798598866e+02,6.680131188771972e+01,-1.328068155288572e+01
};
double c[] =
{
-7.784894002430293e-03,-3.223964580411365e-01,
-2.400758277161838e+00,-2.549732539343734e+00,
4.374664141464968e+00,2.938163982698783e+00
};
double d[] =
{
7.784695709041462e-03,3.224671290700398e-01,
2.445134137142996e+00,3.754408661907416e+00
};
double p_low = 0.02425;
double p_high = 1 - p_low;
double p = x;
double result;
if(0 < p && p < p_low)
{
double q = sqrt(-2*log(p));
result = (((((c[0]*q+c[1])*q+c[2])*q+c[3])*q+c[4])*q+c[5]) /
((((d[0]*q+d[1])*q+d[2])*q+d[3])*q+1);
}
else if (p_low <= p && p <= p_high)
{
double q = p - 0.5;
double r = q*q;
result = (((((a[0]*r+a[1])*r+a[2])*r+a[3])*r+a[4])*r+a[5])*q /
(((((b[0]*r+b[1])*r+b[2])*r+b[3])*r+b[4])*r+1);
}
else//upper region
{
double q = sqrt(-2*log(1-p));
result = -(((((c[0]*q+c[1])*q+c[2])*q+c[3])*q+c[4])*q+c[5]) /
((((d[0]*q+d[1])*q+d[2])*q+d[3])*q+1);
}
//Refining step
double e = cdf(result, 0, 1) - p;
double u = e*sqrt(2*PI)*exp(result*result/2);
result = result - u / (1 + result*u/2);
return result * sigma + mu;
}
public double invCdf(double d)
{
return invcdf(d, mean, stndDev);
}
public static double pdf(double x, double mu, double sigma)
{
return 1/sqrt(2*PI*sigma*sigma)*exp(-pow(x-mu,2)/(2*sigma*sigma));
}
public double pdf(double d)
{
return pdf(d, mean, stndDev);
}
/**
* Computes the log probability of a given value
* @param x the value to the get log(pdf) of
* @param mu the mean of the distribution
* @param sigma the standard deviation of the distribution
* @return the log probability
*/
public static double logPdf(double x, double mu, double sigma)
{
return -0.5*log(2*PI) - log(sigma) + -pow(x-mu,2)/(2*sigma*sigma);
}
@Override
public double logPdf(double x)
{
return logPdf(x, mean, stndDev);
}
public double invPdf(double d)
{
/**
* inverse pdf of a normal distribution is
*
* 2
* (mu - x)
* ---------
* 2 ____
* 2 sigma / __
* e \/ 2 || sigma
*
*/
return exp(pow(mean-d, 2)/(2*pow(stndDev, 2)))*sqrt(2*PI)*stndDev;
}
public static double zTransform(double x, double mu, double sigma)
{
return (x-mu)/sigma;
}
public double zTransform(double x)
{
return zTransform(x, mean, stndDev);
}
private static double cdfApproxMarsaglia2004(double x)
{
if(x >= 8.22)//by 8.22 there are 16 sig figs of .999...
return 1;
else if(x <= -8.22)
return 0;
/*
* Journal of Statistical Software (July 2004, Volume 11, Issue 5), George Marsaglia
* Algorithum to compute the cdf of the normal distribution for some z score
*/
double s = x, t = 0, b = x, q = x*x , i = 1;
//XXX double comparison
while(s != t)
s=(t=s)+(b*=q/(i+=2));
return 0.5+s*exp(-.5*q-0.91893853320467274178);
}
@Override
public String getDescriptiveName()
{
return "Normal(\u03BC=" + mean + ", \u03C3=" + stndDev + ")";
}
@Override
public double min()
{
return Double.NEGATIVE_INFINITY;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String getDistributionName()
{
return "Normal";
}
@Override
public String[] getVariables()
{
return new String[]{GreekLetters.mu, GreekLetters.sigma};
}
@Override
public void setVariable(String var, double value)
{
if(var.equals(GreekLetters.mu))
mean = value;
else if(var.equals(GreekLetters.sigma))
setStndDev(value);
}
@Override
public ContinuousDistribution clone()
{
return new Normal(mean, stndDev);
}
@Override
public void setUsingData(Vec data)
{
mean = data.mean();
setStndDev(data.standardDeviation());
}
@Override
public double[] getCurrentVariableValues()
{
return new double[]{mean, stndDev};
}
@Override
public double mean()
{
return mean;
}
@Override
public double median()
{
return mean;
}
@Override
public double mode()
{
return mean;
}
@Override
public double variance()
{
return stndDev*stndDev;
}
@Override
public double standardDeviation()
{
return stndDev;
}
@Override
public double skewness()
{
return 0;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(mean);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(stndDev);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Normal other = (Normal) obj;
if (Double.doubleToLongBits(mean) != Double
.doubleToLongBits(other.mean)) {
return false;
}
if (Double.doubleToLongBits(stndDev) != Double
.doubleToLongBits(other.stndDev)) {
return false;
}
return true;
}
}
| 8,183 | 23.284866 | 118 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/Pareto.java |
package jsat.distributions;
import jsat.linear.Vec;
import jsat.text.GreekLetters;
import static java.lang.Math.*;
/**
*
* @author Edward Raff
*/
public class Pareto extends ContinuousDistribution
{
private static final long serialVersionUID = 2055881279858330509L;
/**
* scale
*/
private double xm;
/**
* shape
*/
private double alpha;
public Pareto()
{
this(1, 3);
}
public Pareto(double xm, double alpha)
{
setXm(xm);
setAlpha(alpha);
}
public final void setAlpha(double alpha)
{
if(alpha <= 0)
throw new ArithmeticException("Shape parameter must be > 0, not " + alpha);
this.alpha = alpha;
}
public final void setXm(double xm)
{
if(xm <= 0)
throw new ArithmeticException("Scale parameter must be > 0, not " + xm);
this.xm = xm;
}
public double logPdf(double x)
{
if(x < xm )
return Double.NEGATIVE_INFINITY;
return log(alpha) + alpha*log(xm) - (alpha+1)*log(x);
}
@Override
public double pdf(double x)
{
if(x < xm )
return 0;
return exp(logPdf(x));
}
@Override
public double cdf(double x)
{
return 1 - exp( alpha * log(xm/x));
}
@Override
public double invCdf(double p)
{
return xm * pow(1-p, -1/alpha);
}
@Override
public double min()
{
return xm;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String getDistributionName()
{
return "Pareto";
}
@Override
public String[] getVariables()
{
return new String[] {"x_m", GreekLetters.alpha};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[] {xm, alpha};
}
@Override
public void setVariable(String var, double value)
{
if(var.equals("x_m"))
setXm(value);
else if(var.equals(GreekLetters.alpha))
setAlpha(value);
}
@Override
public ContinuousDistribution clone()
{
return new Pareto(xm, alpha);
}
@Override
public void setUsingData(Vec data)
{
double mean = data.mean();
double var = data.variance();
double aP = sqrt( (mean*mean+var)/var), alphaC = aP+1;
double xmC = mean*aP /alphaC;
if(alphaC > 0 && xmC > 0)
{
setAlpha(alphaC);
setXm(xmC);
}
}
@Override
public double mean()
{
if(alpha > 1)
return alpha*xm/(alpha-1);
return Double.NaN;
}
@Override
public double mode()
{
return xm;
}
@Override
public double variance()
{
if(alpha > 2)
return xm*xm*alpha/ (pow(alpha-1, 2)*(alpha-2) );
return Double.NaN;
}
@Override
public double skewness()
{
return sqrt((alpha-2)/alpha)*(2*(1+alpha)/(alpha-3));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(alpha);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(xm);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Pareto other = (Pareto) obj;
if (Double.doubleToLongBits(alpha) != Double
.doubleToLongBits(other.alpha)) {
return false;
}
if (Double.doubleToLongBits(xm) != Double.doubleToLongBits(other.xm)) {
return false;
}
return true;
}
}
| 3,879 | 18.1133 | 87 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/Rayleigh.java |
package jsat.distributions;
import jsat.linear.Vec;
import jsat.text.GreekLetters;
import static java.lang.Math.*;
/**
*
* @author Edward Raff
*/
public final class Rayleigh extends ContinuousDistribution
{
private static final long serialVersionUID = 1451949391703281531L;
/**
* scale parameter
*/
private double sig;
public Rayleigh(double sig)
{
setScale(sig);
}
public void setScale(double sig)
{
if(sig <=0 || Double.isInfinite(sig) || Double.isNaN(sig))
throw new ArithmeticException("The " + GreekLetters.sigma + " parameter must be > 0, not " + sig);
this.sig = sig;
}
public double getScale()
{
return sig;
}
@Override
public double pdf(double x)
{
if (x < 0)
return 0;
double sigSqr = sig*sig;
return x / sigSqr * exp(-x*x/(2*sigSqr));
}
@Override
public double cdf(double x)
{
double sigSqr = sig*sig;
return 1 - exp(-x*x/(2*sigSqr));
}
@Override
public double invCdf(double p)
{
return sqrt(sig*sig*log(1/(1-p)))*sqrt(2.0);
}
@Override
public double min()
{
return 0;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String getDistributionName()
{
return "Rayleigh";
}
@Override
public String[] getVariables()
{
return new String[]{GreekLetters.sigma};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[]{sig};
}
@Override
public void setVariable(String var, double value)
{
if(var.equals(GreekLetters.sigma))
setScale(value);
}
@Override
public ContinuousDistribution clone()
{
return new Rayleigh(sig);
}
@Override
public void setUsingData(Vec data)
{
/**
*
* ____________
* / N
* / =====
* / 1 \ 2
* sigma = / --- > x
* / 2 N / i
* / =====
* \/ i = 1
*
*/
//TODO Need to add some API to SparceVector to make this summation more efficient
double tmp = 0;
for(int i = 0; i < data.length(); i++)
tmp += pow(data.get(i), 2);
tmp /= (2*data.length());
tmp = sqrt(tmp);
setScale(tmp);
}
@Override
public double mean()
{
return sig*sqrt(PI/2);
}
@Override
public double median()
{
return sig*sqrt(log(4));
}
@Override
public double mode()
{
return sig;
}
@Override
public double variance()
{
return (4-PI)/2*sig*sig;
}
@Override
public double skewness()
{
return 2*sqrt(PI)*(PI-3)/(pow(4-PI, 3.0/2.0));
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(sig);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Rayleigh other = (Rayleigh) obj;
if (Double.doubleToLongBits(sig) != Double.doubleToLongBits(other.sig)) {
return false;
}
return true;
}
}
| 3,641 | 17.48731 | 110 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/StudentT.java |
package jsat.distributions;
import jsat.linear.Vec;
import static java.lang.Math.*;
import java.util.Random;
import static jsat.math.SpecialMath.*;
import jsat.text.GreekLetters;
/**
*
* @author Edward Raff
*/
public class StudentT extends ContinuousDistribution
{
private static final long serialVersionUID = -3157525461647767831L;
double df;
double mu;
double sig;
public StudentT(double df)
{
this(df, 0, 1);
}
public StudentT(double df, double mu, double sig)
{
this.df = df;
this.mu = mu;
this.sig = sig;
}
/**
* Sets the degrees of freedom used by the test.
*
* @param df the new value for the degrees of freedom.
*/
public void setDf(double df)
{
this.df = df;
}
@Override
public double pdf(double t)
{
double leftSide = lnGamma((df+1)/2) - lnGamma(df/2) - lnGamma(df*PI)/2 - log(sig);
double rightSide = -(df+1)/2*log(1+pow((t-mu)/sig, 2)/df);
return exp(leftSide+rightSide);
}
@Override
public double cdf(double t)
{
double x = df/(df + pow((t-mu)/sig, 2));
double p = betaIncReg(x, df/2, 0.5)/2;
if( t > mu)
return 1 - p;
else
return p;
}
@Override
public double invCdf(double p)
{
if(p < 0 || p > 1)
throw new ArithmeticException("Probability must be in the range [0,1], not " + p);
double x = invBetaIncReg(2*Math.min(p,1-p), df/2, 0.5);
x = sig*sqrt(df*(1-x)/x);
if(p >= 0.5)
return mu+x;
else
return mu-x;
}
@Override
public double min()
{
return Double.NEGATIVE_INFINITY;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String getDescriptiveName()
{
return "Student-T(df=" + df +", \u03BC=" + mu + ", \u03C3=" + sig + ")";
}
@Override
public String getDistributionName()
{
return "Student-T";
}
@Override
public String[] getVariables()
{
return new String[]{"df", GreekLetters.mu, GreekLetters.sigma};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[]{df, mu, sig};
}
@Override
public void setVariable(String var, double value)
{
if (var.equals("df"))
if (value > 0)
df = value;
else
throw new ArithmeticException("Degrees of Fredom must be greater than 0");
else if (var.equals(GreekLetters.mu))
mu = value;
else if (var.equals(GreekLetters.sigma))
if (value > 0)
sig = value;
else
throw new ArithmeticException("Standard deviation must be greater than zero");
}
@Override
public ContinuousDistribution clone()
{
return new StudentT(df, mu, sig);
}
@Override
public void setUsingData(Vec data)
{
/*
* While not true in every use of the t-distribution,
* we assume degrees of fredom is n-1 if n is the number of samples
*
*/
df = data.length()-1;
mu = data.mean();
sig = sqrt(data.variance()*df/(df-2));
}
@Override
public double mean()
{
return mu;
}
@Override
public double median()
{
return mu;
}
@Override
public double mode()
{
return mu;
}
@Override
public double variance()
{
if(df<=1)
return Double.NaN;
else if(1 < df && df <= 2)
return Double.POSITIVE_INFINITY;
return df/(df-2)*sig*sig;
}
@Override
public double skewness()
{
if(df <= 3)//Undefined for df <= 3
return Double.NaN;
return 0;
}
@Override
public double[] sample(int numSamples, Random rand)
{
if(mu != 0 || sig != 1)
return super.sample(numSamples, rand);
double[] sample = new ChiSquared(df).sample(numSamples, rand);
for(int i = 0; i < sample.length; i++)
sample[i] = rand.nextGaussian()/sqrt(sample[i]/df);
return sample;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(df);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(mu);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(sig);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
StudentT other = (StudentT) obj;
if (Double.doubleToLongBits(df) != Double.doubleToLongBits(other.df)) {
return false;
}
if (Double.doubleToLongBits(mu) != Double.doubleToLongBits(other.mu)) {
return false;
}
if (Double.doubleToLongBits(sig) != Double.doubleToLongBits(other.sig)) {
return false;
}
return true;
}
}
| 5,319 | 20.714286 | 94 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/TruncatedDistribution.java | /*
* Copyright (C) 2015 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.distributions;
import jsat.linear.Vec;
/**
* This distribution truncates a given continuous distribution only be valid for
* values in the range (min, max]. The {@link #pdf(double) pdf} for any value
* outside that range will be 0.<br>
* <br>
* The {@link #pdf(double) }, {@link #cdf(double) }, and the {@link #invCdf(double)
* } methods are implemented efficiently, with little overhead per call. All
* other methods are approximated numerically, and incur more overhead.
*
* @author Edward Raff <[email protected]>
*/
public class TruncatedDistribution extends ContinuousDistribution
{
private ContinuousDistribution base;
private double min;
private double max;
/**
* The probability of a given value coming from the specified range of the
* original distribution
*/
private double probInOrigRange;
/**
* the PDF(min) of the base distribution
*/
private double old_min_p;
/**
* The PDF(max) of the base distribution
*/
private double old_max_p;
public TruncatedDistribution(ContinuousDistribution base, double min, double max)
{
this.base = base;
this.min = min;
this.max = max;
computeNeeded();
}
@Override
public double pdf(double x)
{
if(x <= min || x > max)
return 0;
return base.pdf(x)/probInOrigRange;
}
@Override
public double cdf(double x)
{
if(x <= min)
return 0;
else if (x >= max)
return 1;
else
return (base.cdf(x)-old_min_p)/probInOrigRange;
}
@Override
public double invCdf(double p)
{
double old_min_p = base.cdf(min);
double old_max_p = base.cdf(max);
//rescale p to the range of p values that are acceptable to the base distribution now
double newP = (old_max_p-old_min_p)*p+old_min_p;
return base.invCdf(newP);
}
private void computeNeeded()
{
old_min_p = base.cdf(min);
old_max_p = base.cdf(max);
probInOrigRange = old_max_p-old_min_p;
}
@Override
public String getDistributionName()
{
return "(" + min + ", " + max + "] Truncated " + base.getDescriptiveName();
}
//TODO should min/max be set by this too?
@Override
public String[] getVariables()
{
return base.getVariables();
}
@Override
public double[] getCurrentVariableValues()
{
return base.getCurrentVariableValues();
}
@Override
public void setVariable(String var, double value)
{
base.setVariable(var, value);
computeNeeded();
}
@Override
public TruncatedDistribution clone()
{
return new TruncatedDistribution(base.clone(), min, max);
}
@Override
public void setUsingData(Vec data)
{
base.setUsingData(data);
computeNeeded();
}
@Override
public double mode()
{
double baseMode = base.mode();
if(baseMode <= max && baseMode > min)
return baseMode;
return super.mode();
}
@Override
public double min()
{
return Math.max(Math.nextUp(min), base.min());
}
@Override
public double max()
{
return Math.min(max, base.max());
}
}
| 4,098 | 24.302469 | 93 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/Uniform.java |
package jsat.distributions;
import jsat.linear.Vec;
/**
*
* @author Edward Raff
*/
public class Uniform extends ContinuousDistribution
{
private static final long serialVersionUID = 2479606544724378610L;
private double a, b;
public Uniform(double a, double b)
{
double min = Math.min(a, b);
double max = Math.max(a, b);
this.a = min;
this.b = max;
}
@Override
public double pdf(double x)
{
if(a == b && a == x)
return 0;
else if(a <= x && x <= b)
return 1/(b-a);
else
return 0;
}
@Override
public double cdf(double x)
{
if(a > x)
return 0;
else if( x >= b)
return 1;
else if(a == b && a == x)
return 1;
else
return (x-a)/(b-a);
}
@Override
public double invCdf(double p)
{
if( p < 0 || p > 1)
throw new ArithmeticException("Probability must be interface the range [0,1], not " + p);
if(a == b && p == 1)
return a;
return a + p*(b-a);
}
@Override
public double min()
{
return Double.NEGATIVE_INFINITY;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String getDistributionName()
{
return "Uniform";
}
@Override
public String[] getVariables()
{
return new String[] {"a", "b"};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[] {a, b};
}
@Override
public void setVariable(String var, double value)
{
if(var.equals("a"))
a = value;
else if(var.equals("b"))
b = value;
double min = Math.min(a, b);
double max = Math.max(a, b);
a = min;
b = max;
}
@Override
public ContinuousDistribution clone()
{
return new Uniform(a, b);
}
@Override
public void setUsingData(Vec data)
{
a = data.min();
b = data.max();
}
@Override
public double mean()
{
return (a+b)*0.5;
}
@Override
public double median()
{
return mean();
}
@Override
public double mode()
{
return mean();//Any value interface [a,b] can actualy be the mode
}
@Override
public double variance()
{
return Math.pow(b-a, 2)/12.0;
}
@Override
public double skewness()
{
return 0;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(a);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(b);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Uniform other = (Uniform) obj;
if (Double.doubleToLongBits(a) != Double.doubleToLongBits(other.a)) {
return false;
}
if (Double.doubleToLongBits(b) != Double.doubleToLongBits(other.b)) {
return false;
}
return true;
}
}
| 3,360 | 17.467033 | 101 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/Weibull.java |
package jsat.distributions;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.math.SimpleLinearRegression;
import jsat.text.GreekLetters;
import static java.lang.Math.*;
import static jsat.math.SpecialMath.*;
/**
*
* @author Edward Raff
*/
public class Weibull extends ContinuousDistribution
{
private static final long serialVersionUID = -4083186674624535562L;
/**
* Shape parameter
*/
double alpha;
/**
* Scale parameter
*/
double beta;
private double logAlpha, logBeta;
public Weibull(double alpha, double beta)
{
setAlpha(alpha);
setBeta(beta);
}
public double reliability(double x)
{
return exp(-pow(x/alpha, beta));
}
public double failureRate(double x)
{
return beta/alpha * pow(x/alpha, beta-1);
}
@Override
public double logPdf(double x)
{
if(x <= 0)
return -Double.MAX_VALUE;
return logAlpha-logBeta+(alpha-1)*log(x/beta) -pow(x/beta, alpha) ;
}
@Override
public double pdf(double x)
{
if(x < 0)
return 0;
return alpha/beta * pow(x/beta, alpha-1)*exp(-pow(x/beta, alpha));
}
@Override
public double cdf(double x)
{
return 1 - exp(-pow(x/beta, alpha));
}
@Override
public double invCdf(double p)
{
return beta*pow(-log(1-p),1/alpha);
}
@Override
public double min()
{
return 0;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String getDistributionName()
{
return "Weibull";
}
@Override
public String[] getVariables()
{
return new String[]{GreekLetters.alpha, GreekLetters.beta};
}
@Override
public double[] getCurrentVariableValues()
{
return new double[]{alpha, beta};
}
@Override
public void setVariable(String var, double value)
{
if (var.equals("alpha") || var.equals(GreekLetters.alpha))
setAlpha(value);
else if (var.equals("beta") || var.equals(GreekLetters.beta))
setBeta(value);
}
final public void setAlpha(double alpha)
{
if(alpha > 0)
{
this.alpha = alpha;
logAlpha = log(alpha);
}
else
throw new ArithmeticException("alpha must be > 0 not " + alpha);
}
final public void setBeta(double beta)
{
if(beta > 0)
{
this.beta = beta;
logBeta = log(beta);
}
else
throw new ArithmeticException("beta must be > 0 not " + beta);
}
@Override
public ContinuousDistribution clone()
{
return new Weibull(alpha, beta);
}
@Override
public void setUsingData(Vec data)
{
/* Method of parameter esstimation is more complex than for
* other distirbutions, see
* http://www.qualitydigest.com/jan99/html/body_weibull.html
* for the method used. NOTE the above article has alpha and beta in oposite order
*/
Vec sData = data.sortedCopy();
DenseVector ranks = new DenseVector(sData.length());
for(int i = 0; i < sData.length(); i++)
{
//Get the median rank
double tmp = (i+1.0-0.3)/(sData.length()+0.4);
tmp = 1/(1-tmp);
tmp = log(log(tmp));
ranks.set(i, tmp);
sData.set(i, log(sData.get(i)));
}
double[] s = SimpleLinearRegression.regres(sData, ranks);
//The shape parameter is approximatly the slope
setAlpha(s[1]);
/*
* We can now compute alpha directly.
* Note the page use y = m x + b, instead of y = b x + a
*
*/
setBeta(exp(-s[0]/alpha));
}
@Override
public double mean()
{
return beta * gamma(1+1/alpha);
}
@Override
public double median()
{
return pow(log(2), 1/alpha)*beta;
}
@Override
public double mode()
{
if(alpha <= 1)
throw new ArithmeticException("Mode only exists for k > 1");
return beta * pow( (alpha-1)/alpha, 1/alpha);
}
@Override
public double variance()
{
return beta*beta * gamma(1+2/alpha) - pow(median(),2);
}
@Override
public double skewness()
{
double mu = mean();
double stnDev = standardDeviation();
return (gamma(1 + 3/alpha)*pow(beta, 3)-3*mu*pow(stnDev, 2)-pow(mu, 3))/pow(stnDev, 3);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
long temp;
temp = Double.doubleToLongBits(alpha);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(beta);
result = prime * result + (int) (temp ^ (temp >>> 32));
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Weibull other = (Weibull) obj;
if (Double.doubleToLongBits(alpha) != Double
.doubleToLongBits(other.alpha)) {
return false;
}
if (Double.doubleToLongBits(beta) != Double
.doubleToLongBits(other.beta)) {
return false;
}
return true;
}
}
| 5,560 | 20.306513 | 95 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/discrete/Binomial.java | /*
* Copyright (C) 2015 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.distributions.discrete;
import static jsat.math.SpecialMath.*;
import static java.lang.Math.*;
/**
* The Binomial distribution is the distribution for the number of successful,
* independent, trials with a specific probability of success
*
* @author Edward Raff
*/
public class Binomial extends DiscreteDistribution
{
private int trials;
private double p;
/**
* Creates a new Binomial distribution for 1 trial with a 0.5 probability of
* success
*/
public Binomial()
{
this(1, 0.5);
}
/**
* Creates a new Binomial distribution
*
* @param trials the number of independent trials
* @param p the probability of success
*/
public Binomial(int trials, double p)
{
setTrials(trials);
setP(p);
}
/**
* The number of trials for the distribution
* @param trials the number of trials to perform
*/
public void setTrials(int trials)
{
if(trials < 1)
throw new IllegalArgumentException("number of trials must be positive, not " + trials);
this.trials = trials;
}
public int getTrials()
{
return trials;
}
/**
* Sets the probability of a trial being a success
* @param p the probability of success for each trial
*/
public void setP(double p)
{
if(Double.isNaN(p) || p < 0 || p > 1)
throw new IllegalArgumentException("probability of success must be in [0, 1], not " + p);
this.p = p;
}
public double getP()
{
return p;
}
@Override
public double logPmf(int x)
{
if(x > trials || x < 0)
return -Double.MAX_VALUE;
//re write as: log((Gamma(n+1) p^x (1-p)^(n-x))/(Gamma(x+1) Gamma(n-x+1)))
//then expand to: n log(1-p)-log(Gamma(n-x+1))+log(Gamma(n+1))-x log(1-p)+x log(p)-log(Gamma(x+1))
final int n = trials;
return n*log(1-p) - lnGamma(n-x+1) + lnGamma(n+1) - x*log(1-p)+ x * log(p) - lnGamma(x+1);
}
@Override
public double pmf(int x)
{
if(x > trials || x < 0)
return 0;
return exp(logPmf(x));
}
@Override
public double cdf(int x)
{
if(x >= trials)
return 1;
if(x < 0)
return 0;
return betaIncReg(1-p, trials-x, 1+x);
}
@Override
public double mean()
{
return trials*p;
}
@Override
public double median()
{
if(Math.abs(p-0.5) < 1e-3)//special case p = 1/2, trials/2 is the unique median for trials % 2 == 1, and is a valid median if trials % 2 == 0
return trials/2;
if(p <= 1 - Math.log(2) || p >= Math.log(2))
return Math.round(trials*p);//exact unique median
return invCdf(0.5);
}
@Override
public double mode()
{
if(p == 1)
return trials;
else
return Math.floor((trials+1)*p);
}
@Override
public double variance()
{
return trials*p*(1-p);
}
@Override
public double skewness()
{
return (1-2*p)/standardDeviation();
}
@Override
public double min()
{
return 0;
}
@Override
public double max()
{
return trials;
}
@Override
public Binomial clone()
{
return new Binomial(trials, p);
}
}
| 4,132 | 22.482955 | 149 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/discrete/DiscreteDistribution.java | /*
* Copyright (C) 2015 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.distributions.discrete;
import jsat.distributions.Distribution;
import jsat.math.Function1D;
import jsat.math.rootfinding.Zeroin;
/**
* This abstract class defines the contract for a distribution over the integer
* values.<br>
* <br>
* The {@link #cdf(double) } method will behave by
* {@link Math#floor(double) rounding down} and then calling the integer {@link #cdf(int)
* } counterpart.
*
* @author Edward Raff
*/
abstract public class DiscreteDistribution extends Distribution
{
/**
* Computes the log of the Probability Mass Function. Note, that then the
* probability is zero, {@link Double#NEGATIVE_INFINITY} would be the true
* value. Instead, this method will always return the negative of
* {@link Double#MAX_VALUE}. This is to avoid propagating bad values through
* computation.
*
* @param x the value to get the log(PMF) of
* @return the value of log(PMF(x))
*/
public double logPmf(int x)
{
double pmf = pmf(x);
if (pmf <= 0)
return -Double.MAX_VALUE;
return Math.log(pmf);
}
abstract public double pmf(int x);
/**
* Computes the value of the Cumulative Density Function (CDF) at the given point.
* The CDF returns a value in the range [0, 1], indicating what portion of values
* occur at or below that point.
*
* @param x the value to get the CDF of
* @return the CDF(x)
*/
abstract public double cdf(int x);
@Override
public double cdf(double x)
{
return cdf((int)Math.floor(x));
}
@Override
public double invCdf(double p)
{
return invCdfRootFinding(p, 1e-6);
}
/**
* Helper method that computes the inverse CDF by performing root-finding on
* the CDF of the function. This provides a convenient default method for
* any invCdfRootFinding implementation, but may not be as fast or accurate
* as possible.
*
* @param p the probability value
* @param tol the search tolerance
* @return the value such that the CDF would return p
*/
protected double invCdfRootFinding(double p, double tol)
{
if (p < 0 || p > 1)
throw new ArithmeticException("Value of p must be in the range [0,1], not " + p);
//two special case checks, as they can cause a failure to get a positive and negative value on the ends, which means we can't do a search for the root
//Special case check, p < min value
if(min() >= Integer.MIN_VALUE)
if(p <= cdf(min()))
return min();
//special case check, p >= max value
if(max() < Integer.MAX_VALUE)
if(p > cdf(max()-1))
return max();
//stewpwise nature fo discrete can cause problems for search, so we will use a smoothed cdf to pass in
// double toRet= invCdf(p, );
//Lets use an interpolated version of the CDF so that our numerical methods will behave better
Function1D cdfInterpolated = (double x) ->
{
double query = x;
//if it happens to fall on an int we just compute the regular value
if(Math.rint(query) == query)
return cdf((int)query) - p;
//else, interpolate
double larger = query+1;
double diff = larger-query;
return cdf(query)*diff + cdf(larger)*(1-diff) - p;
};
double a = Double.isInfinite(min()) ? Integer.MIN_VALUE*.95 : min();
double b = Double.isInfinite(max()) ? Integer.MAX_VALUE*.95 : max();
double toRet = Zeroin.root(tol, a, b, cdfInterpolated);
return Math.round(toRet);
}
@Override
abstract public DiscreteDistribution clone();
}
| 4,522 | 33.265152 | 158 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/discrete/Poisson.java | /*
* Copyright (C) 2015 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.distributions.discrete;
import static jsat.math.SpecialMath.*;
import static java.lang.Math.*;
import java.util.Random;
import jsat.linear.DenseVector;
import jsat.math.SpecialMath;
/**
* The Poisson distribution is for the number of events occurring in a fixed
* amount of time, where the event has an average rate and all other occurrences
* are independent.
*
* @author Edward Raff
*/
public class Poisson extends DiscreteDistribution
{
private double lambda;
/**
* Creates a new Poisson distribution with λ = 1
*/
public Poisson()
{
this(1);
}
/**
* Creates a new Poisson distribution
* @param lambda the average rate of the event
*/
public Poisson(double lambda)
{
setLambda(lambda);
}
/**
* Sets the average rate of the event occurring in a unit of time
*
* @param lambda the average rate of the event occurring
*/
public void setLambda(double lambda)
{
if (Double.isNaN(lambda) || lambda <= 0 || Double.isInfinite(lambda))
throw new IllegalArgumentException("lambda must be positive, not " + lambda);
this.lambda = lambda;
}
/**
*
* @return the average rate of the event occurring in a unit of time
*/
public double getLambda()
{
return lambda;
}
@Override
public double logPmf(int x)
{
if(x < 0)
return -Double.MAX_VALUE;
//log(e^-lambda lambda^x / x!)
//log(x!) = log(Gamma(x+1))
return -lnGamma(x+1) - lambda + x * log(lambda);
}
@Override
public double pmf(int x)
{
if(x < 0)
return 0;
return Math.exp(logPmf(x));
}
@Override
public double cdf(int x)
{
if(x < 0)
return 0;
return gammaQ(x+1, lambda);
}
private double sampleOne(Random rand)
{
//From http://www.johndcook.com/blog/2010/06/14/generating-poisson-random-values/
double c = 0.767 - 3.36/lambda;
double beta = PI/sqrt(3.0*lambda);
double alpha = beta*lambda;
double k = log(c) - lambda - log(beta);
while(true)
{
double u = rand.nextDouble();
double x = (alpha - log((1.0 - u) / u)) / beta;
double n = floor(x + 0.5);
if (n < 0)
continue;
double v = rand.nextDouble();
double y = alpha - beta * x;
// double lhs = y + log(v/(1.0 + exp(y))^2);
//simplify right part as log(v)-2 log(e^y+1)
// double lhs = y + log(v/pow(1.0 + exp(y), 2));
double lhs = y + log(v) - 2 * log(exp(y) + 1);
// double rhs = k + n*log(lambda) - log(n!);
double rhs = k + n * log(lambda) - SpecialMath.lnGamma(n + 1);
if (lhs <= rhs)
return n;
}
}
@Override
public double[] sample(int numSamples, Random rand)
{
double[] samples = new double[numSamples];
if(lambda < 60)
{
//https://en.wikipedia.org/wiki/Poisson_distribution
double p_init = exp(-lambda);
for(int i = 0; i < numSamples; i++)
{
double u = rand.nextDouble();
double x = 0;
double p = p_init;
double s = p;
while(u > s)
{
x++;
p *= lambda/x;
s += p;
}
samples[i] = x;
}
}
else
{
for(int i = 0; i < numSamples; i++)
samples[i] = sampleOne(rand);
}
return samples;
}
@Override
public double mean()
{
return lambda;
}
@Override
public double mode()
{
//see https://math.stackexchange.com/questions/246496/the-mode-of-the-poisson-distribution/246507#246507
if(lambda < 1)
return 0;
else if(lambda > 1 && Math.rint(lambda) != lambda)
return Math.floor(lambda);
else//lambda is an integer
return lambda;//lamda-1 is also valid
}
@Override
public double variance()
{
return lambda;
}
@Override
public double skewness()
{
return 1/standardDeviation();
}
@Override
public double min()
{
return 0;
}
@Override
public double max()
{
return Double.POSITIVE_INFINITY;
}
@Override
public Poisson clone()
{
return new Poisson(lambda);
}
}
| 5,402 | 24.130233 | 112 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/discrete/UniformDiscrete.java | /*
* Copyright (C) 2015 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.distributions.discrete;
/**
* The discrete uniform distribution.
*
* @author Edward Raff
*/
public class UniformDiscrete extends DiscreteDistribution
{
private int min;
private int max;
/**
* Creates a new Uniform distribution with a min of 0 and a max of 10
*/
public UniformDiscrete()
{
this(0, 10);
}
/**
* Creates a new discrete uniform distribution
* @param min the minimum value to occur
* @param max the maximum value to occur
*/
public UniformDiscrete(int min, int max)
{
setMinMax(min, max);
}
/**
* Sets the minimum and maximum values at the same time, this is useful if
* setting them one at a time may have caused a conflict with the previous
* values
*
* @param min the new minimum value to occur
* @param max the new maximum value to occur
*/
public void setMinMax(int min, int max)
{
if(min >= max)
throw new IllegalArgumentException("The input minimum (" + min + ") must be less than the given max (" + max + ")");
this.min = min;
this.max = max;
}
/**
* Sets the minimum value to occur from the distribution, must be less than
* {@link #getMax() }.
*
* @param min the minimum value to occur
*/
public void setMin(int min)
{
if (min >= max)
throw new IllegalArgumentException(min + " must be less than the max value " + max);
this.min = min;
}
public int getMin()
{
return min;
}
/**
* Sets the maximum value to occur from the distribution, must be greater
* than {@link #getMin() }.
* @param max the maximum value to occur
*/
public void setMax(int max)
{
if(max <= min)
throw new IllegalArgumentException(max + " must be greater than the min value " + min);
this.max = max;
}
public int getMax()
{
return max;
}
@Override
public double pmf(int x)
{
if(x < min || x > max)
return 0;
else
return 1.0/(1+max-min);
}
@Override
public double cdf(int x)
{
if(x >= max)
return 1;
else if(x < min)
return 0;
else
return (1-min+x)/(double)(1+max-min);
}
@Override
public double invCdf(double p)
{
if(p <= 0)
return min;
else if(p >= 1)
return max;
else
return Math.max(1, Math.ceil((1+max-min)*p)+min-1);
}
@Override
public double mean()
{
return max/2.0+min/2.0;
}
@Override
public double median()
{
return Math.floor(mean());
}
@Override
public double mode()
{
return Double.NaN;
}
@Override
public double variance()
{
long dif = (max-min+1);
dif *= dif;
return (dif-1)/12.0;
}
@Override
public double skewness()
{
return 0;
}
@Override
public double min()
{
return min;
}
@Override
public double max()
{
return max;
}
@Override
public DiscreteDistribution clone()
{
return new UniformDiscrete(min, max);
}
}
| 4,020 | 21.338889 | 129 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/discrete/Zipf.java | /*
* Copyright (C) 2018 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.distributions.discrete;
import static jsat.math.SpecialMath.*;
import static java.lang.Math.*;
import jsat.math.Function1D;
import jsat.math.rootfinding.Bisection;
/**
* This class provides an implementation of the Zipf distribution, a power-law
* type distribution for discrete values.
*
* @author Edward Raff <[email protected]>
*/
public class Zipf extends DiscreteDistribution
{
private double cardinality;
private double skew;
/**
* Both variants of the Zipf distribution re-use a term of Zeta(1+skew) or Harmonic(cardinality, 1+skew) for most calculations. This value caches this commonly used constant
*/
private double denomCache;
/**
* Creates a new Zipf distribution
* @param cardinality the number of possible selections (or {@link Double#POSITIVE_INFINITY})
* @param skew the skewness of the distribution (must be positive value)
*/
public Zipf(double cardinality, double skew)
{
setCardinality(cardinality);
setSkew(skew);
}
/**
* Creates a new Zipf distribution for a set of infinite cardinality
* @param skew the skewness of the distribution (must be positive value)
*/
public Zipf(double skew)
{
this(Double.POSITIVE_INFINITY, skew);
}
/**
* Creates a new Zipf distribution of infinite cardinality and
* {@link #setSkew(double) skewness} of 1.
*/
public Zipf()
{
this(1.0);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public Zipf(Zipf toCopy)
{
this.cardinality = toCopy.cardinality;
this.skew = toCopy.skew;
this.denomCache = toCopy.denomCache;
}
/**
* Sets the cardinality of the distribution, defining the maximum number of
* items that Zipf can return.
*
* @param cardinality the maximum output range of the distribution, can be
* {@link Double#POSITIVE_INFINITY infinite}.
*/
public void setCardinality(double cardinality)
{
if (cardinality < 0 || Double.isNaN(cardinality))
throw new IllegalArgumentException("Cardinality must be a positive integer or infinity, not " + cardinality);
this.cardinality = Math.ceil(cardinality);
fixCache();
}
/**
*
* @return the cardinality (maximum value) of the distribution
*/
public double getCardinality()
{
return cardinality;
}
/**
* Sets the skewness of the distribution. Lower values spread out the
* probability distribution, while higher values concentrate on the lowest
* ranks.
*
* @param skew the positive value for the distribution's skew
*/
public void setSkew(double skew)
{
if(skew <= 0 || Double.isNaN(skew) || Double.isInfinite(skew))
throw new IllegalArgumentException("Skew must be a positive value, not " + skew);
this.skew = skew;
fixCache();
}
private void fixCache()
{
if (Double.isInfinite(cardinality))
denomCache = zeta(1 + skew);
else
denomCache = harmonic(cardinality, 1 + skew);
}
/**
*
* @return the skewness of the distribution
*/
public double getSkew()
{
return skew;
}
@Override
public double pmf(int x)
{
if(x < 1)
return 0;
if(Double.isInfinite(cardinality))
{
//x^(-1 - skew)/Zeta[1 + skew]
return pow(x, -skew-1)/denomCache;
}
else
{
if(x > cardinality)
return 0;
//x^(-1-skew)/HarmonicNumber[cardinality,1+skew]
return pow(x, -skew-1)/denomCache;
}
}
@Override
public double cdf(int x)
{
if (x < 1)
return 0;
if (x >= cardinality)
return 1;
//Both inf and finite case have same numerator. We've cached the denominator. So just return it.
//HarmonicNumber[x,1+skew]/Zeta[1+skew]
//HarmonicNumber[x,1+skew]/HarmonicNumber[cardinality,1+skew]
return harmonic(x, 1+skew)/denomCache;
}
@Override
public double invCdf(double p)
{
return invCdfRootFinding(p, Math.max(1/cardinality, 1e-14));
}
@Override
protected double invCdfRootFinding(double p, double tol)
{
if (p < 0 || p > 1)
throw new ArithmeticException("Value of p must be in the range [0,1], not " + p);
//two special case checks, as they can cause a failure to get a positive and negative value on the ends, which means we can't do a search for the root
//Special case check, p < min value
if(min() >= Integer.MIN_VALUE)
if(p <= cdf(min()))
return min();
//special case check, p >= max value
if(max() < Integer.MAX_VALUE)
if(p > cdf(max()-1))
return max();
//stewpwise nature fo discrete can cause problems for search, so we will use a smoothed cdf to pass in
//Skip default interpolated, compute smooth directly to half cost
double cnst = p*denomCache;
Function1D cdfInterpolated = (double x) ->
{
double query = x;
//smooth variants of CDF
if (Double.isInfinite(cardinality))
return harmonic(x, 1 + skew) - cnst;
else
return harmonic(x, 1 + skew) - cnst;
};
double a = min();
double b = Double.isInfinite(max()) ? Integer.MAX_VALUE*.95 : max();
//Normally would use Zero-in, but Zipf has bad behavior and it just degrades to bisection + overhead
double toRet = Bisection.root(tol, a, b, cdfInterpolated);
// System.out.println(toRet + " vs " + a);
return Math.min(Math.round(toRet), cardinality);
}
@Override
public Zipf clone()
{
return new Zipf(this);
}
@Override
public double mean()
{
if(Double.isInfinite(cardinality))
{
if(skew <= 1)
return Double.POSITIVE_INFINITY;
//Zeta[skew]/Zeta[1+skew]
return zeta(skew)/denomCache;
}
else
{
//HarmonicNumber[cardinality, skew]/HarmonicNumber[cardinality, 1 + skew]
return harmonic(cardinality, skew)/denomCache;
}
}
@Override
public double mode()
{
return 1;
}
@Override
public double variance()
{
if(Double.isInfinite(cardinality))
{
if(skew <= 2)
return Double.POSITIVE_INFINITY;
//-(Zeta[skew]^2/Zeta[1+skew]^2)+Zeta[-1+skew]/Zeta[1+skew]
double zSkewP1 = denomCache;
double zSkewM1 = zeta(skew-1);
return zSkewM1/zSkewP1 - pow(zeta(skew), 2)/(zSkewP1*zSkewP1);
}
else
{
//(-HarmonicNumber[cardinality,skew]^2+HarmonicNumber[cardinality,-1+skew] HarmonicNumber[cardinality,1+skew])/HarmonicNumber[cardinality,1+skew]^2
double hSkewP1 = harmonic(cardinality, 1+skew);
return (-pow(harmonic(cardinality, skew), 2)+harmonic(cardinality, skew-1) * hSkewP1)/(hSkewP1*hSkewP1);
}
}
@Override
public double skewness()
{
if(Double.isInfinite(cardinality))
{
if(skew <= 3)
return Double.POSITIVE_INFINITY;
//(2 Zeta[skew]^3-3 Zeta[-1+skew] Zeta[skew] Zeta[1+skew]+Zeta[-2+skew] Zeta[1+skew]^2)/(-Zeta[skew]^2+Zeta[-1+skew] Zeta[1+skew])^(3/2)
double zSkew = zeta(skew);
double zSkewP1 = denomCache;
double zSkewM1 = zeta(skew - 1);
return (2 * pow(zSkew, 3) - 3 * zSkewM1 * zSkew * zSkewP1 + zeta(-2 + skew) * pow(zSkewP1, 2)) / pow(-pow(zSkew, 2) + zSkewM1 * zSkewP1, 3.0 / 2.0);
}
else
{
//(2 HarmonicNumber[cardinality,skew]^3-3 HarmonicNumber[cardinality,-1+skew] HarmonicNumber[cardinality,skew] HarmonicNumber[cardinality,1+skew]+HarmonicNumber[cardinality,-2+skew] HarmonicNumber[cardinality,1+skew]^2)/(HarmonicNumber[cardinality,1+skew]^3 ((-HarmonicNumber[cardinality,skew]^2+HarmonicNumber[cardinality,-1+skew] HarmonicNumber[cardinality,1+skew])/HarmonicNumber[cardinality,1+skew]^2)^(3/2))
double hSkewM1 = harmonic(cardinality, skew-1);
double hSkew = harmonic(cardinality, skew);
double hSkewP1 = denomCache;
//numerator is (2 HarmonicNumber[cardinality,skew]^3-3 HarmonicNumber[cardinality,-1+skew] HarmonicNumber[cardinality,skew] HarmonicNumber[cardinality,1+skew]+HarmonicNumber[cardinality,-2+skew] HarmonicNumber[cardinality,1+skew]^2)
double numer = (2*pow(hSkew, 3)-3*hSkewM1*hSkew*hSkewP1+harmonic(cardinality, skew-2)*pow(hSkewP1, 2));
//denominator is (HarmonicNumber[cardinality,1+skew]^3 ((-HarmonicNumber[cardinality,skew]^2+HarmonicNumber[cardinality,-1+skew] HarmonicNumber[cardinality,1+skew])/HarmonicNumber[cardinality,1+skew]^2)^(3/2))
double denom = pow(hSkewP1, 3) * pow( (-pow(hSkew, 2) + hSkewM1 * hSkewP1)/pow(hSkewP1, 2) , 3.0/2.0 );
return numer/denom;
}
}
@Override
public double min()
{
return 1;
}
@Override
public double max()
{
return cardinality;
}
}
| 10,232 | 32.441176 | 424 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/empirical/KernelDensityEstimator.java |
package jsat.distributions.empirical;
import java.util.*;
import jsat.distributions.ContinuousDistribution;
import jsat.distributions.empirical.kernelfunc.*;
import jsat.linear.Vec;
import jsat.math.OnLineStatistics;
import jsat.utils.ProbailityMatch;
/**
* Kernel Density Estimator, KDE, uses the data set itself to approximate the underlying probability
* distribution using {@link KernelFunction Kernel Functions}.
*
* @author Edward Raff
*/
public class KernelDensityEstimator extends ContinuousDistribution
{
/*
* README
* Implementation note:
* The values are stored in sorted order, which allows for fast evaluations.
* Instead of doing the full loop on each function call, O(n) time,
* we know the bounds on the values that will effect results, so we
* can do 2 binary searches and then a loop. Though this is still
* technically, O(n), its more accurately described as O(n * epsilon * log(n)) , where n * epsilon << n
*/
private static final long serialVersionUID = 7708020456632603947L;
/**
* The various values
*/
private double[] X;
/**
* Weights corresponding to each value. If all the same, weights should have a length of 0
*/
private double[] weights;
/**
* For unweighted data, this is equal to X.length
*/
private double sumOFWeights;
/**
* The bandwidth
*/
private double h;
private double Xmean, Xvar, Xskew;
private KernelFunction k;
public static double BandwithGuassEstimate(Vec X)
{
if(X.length() == 1 )
return 1;
else if(X.standardDeviation() == 0)
return 1.06 * Math.pow(X.length(), -1.0/5.0);
return 1.06 * X.standardDeviation() * Math.pow(X.length(), -1.0/5.0);
}
/**
* Automatically selects a good Kernel function for the data set that balances Execution time and accuracy
* @param dataPoints
* @return a kernel that will work well for the given distribution
*/
public static KernelFunction autoKernel(Vec dataPoints )
{
if(dataPoints.length() < 30)
return GaussKF.getInstance();
else if(dataPoints.length() < 1000)
return EpanechnikovKF.getInstance();
else//For very large data sets, Uniform is FAST and just as accurate
return UniformKF.getInstance();
}
public KernelDensityEstimator(Vec dataPoints)
{
this(dataPoints, autoKernel(dataPoints));
}
public KernelDensityEstimator(Vec dataPoints, KernelFunction k)
{
this(dataPoints, k, BandwithGuassEstimate(dataPoints));
}
public KernelDensityEstimator(Vec dataPoints, KernelFunction k, double[] weights)
{
this(dataPoints, k, BandwithGuassEstimate(dataPoints), weights);
}
public KernelDensityEstimator(Vec dataPoints, KernelFunction k, double h)
{
setUpX(dataPoints);
this.k = k;
this.h = h;
}
public KernelDensityEstimator(Vec dataPoints, KernelFunction k, double h, double[] weights)
{
setUpX(dataPoints, weights);
this.k = k;
this.h = h;
}
/**
* Copy constructor
*/
private KernelDensityEstimator(double[] X, double h, double Xmean, double Xvar, double Xskew, KernelFunction k, double sumOfWeights, double[] weights)
{
this.X = Arrays.copyOf(X, X.length);
this.h = h;
this.Xmean = Xmean;
this.Xvar = Xvar;
this.Xskew = Xskew;
this.k = k;
this.sumOFWeights = sumOfWeights;
this.weights = Arrays.copyOf(weights, weights.length);
}
private void setUpX(Vec S)
{
Xmean = S.mean();
Xvar = S.variance();
Xskew = S.skewness();
X = S.arrayCopy();
Arrays.sort(X);
sumOFWeights = X.length;
weights = new double[0];
}
private void setUpX(Vec S, double[] weights)
{
if(S.length() != weights.length)
throw new RuntimeException("Weights and variables do not have the same length");
OnLineStatistics stats = new OnLineStatistics();
X = new double[S.length()];
this.weights = Arrays.copyOf(weights, S.length());
//Probability is the X value, match is the weights - so that they can be sorted together.
List<ProbailityMatch<Double>> sorter = new ArrayList<ProbailityMatch<Double>>(S.length());
for(int i = 0; i < S.length(); i++)
sorter.add(new ProbailityMatch<Double>(S.get(i), weights[i]));
Collections.sort(sorter);
for(int i = 0; i < sorter.size(); i++)
{
this.X[i] = sorter.get(i).getProbability();
this.weights[i] = sorter.get(i).getMatch();
stats.add(this.X[i], this.weights[i]);
}
//Now do some helpful preprocessing on weights. We will make index i store the sum for [0, i].
//Each individual weight can still be retrieved in O(1) by accessing a 2nd index and a subtraction
//Methods that need the sum can now access it in O(1) time from the weights array instead of doing an O(n) summations
for(int i = 1; i < this.weights.length; i++)
this.weights[i] += this.weights[i-1];
sumOFWeights = this.weights[this.weights.length-1];
this.Xmean = stats.getMean();
this.Xvar = stats.getVarance();
this.Xskew = stats.getSkewness();
}
private double getWeight(int i)
{
if(weights.length == 0)
return 1.0;
else if(i == 0)
return weights[i];
else
return weights[i] - weights[i-1];
}
@Override
public double pdf(double x)
{
return pdf(x, -1);
}
/**
* Computes the Leave One Out PDF of the estimator
* @param x the value to get the pdf of
* @param j the sorted index of the value to leave. If a negative value is given, the PDF with all values is returned
* @return the pdf with the given index left out
*/
private double pdf(double x, int j)
{
/*
* n
* ===== /x - x \
* 1 \ | i|
* f(x) = --- > K|------|
* n h / \ h /
* =====
* i = 1
*
*/
//Only values within a certain range will have an effect on the result, so we will skip to that range!
int from = Arrays.binarySearch(X, x-h*k.cutOff());
int to = Arrays.binarySearch(X, x+h*k.cutOff());
//Mostly likely the exact value of x is not in the list, so it returns the inseration points
from = from < 0 ? -from-1 : from;
to = to < 0 ? -to-1 : to;
//Univariate opt, if uniform weights, the sum is just the number of elements divide by half
if(weights.length == 0 && k instanceof UniformKF)
return (to-from)*0.5/ (sumOFWeights*h);
double sum = 0;
for(int i = Math.max(0, from); i < Math.min(X.length, to+1); i++)
if(i != j)
sum += k.k( (x-X[i])/h )*getWeight(i);
return sum / (sumOFWeights * h);
}
@Override
public double cdf(double x)
{
//Only values within a certain range will have an effect on the result, so we will skip to that range!
int from = Arrays.binarySearch(X, x-h*k.cutOff());
int to = Arrays.binarySearch(X, x+h*k.cutOff());
//Mostly likely the exact value of x is not in the list, so it returns the inseration points
from = from < 0 ? -from-1 : from;
to = to < 0 ? -to-1 : to;
double sum = 0;
for(int i = Math.max(0, from); i < Math.min(X.length, to+1); i++)
sum += k.intK( (x-X[i]) /h )*getWeight(i);
/*
* Slightly different, all things below the from value for the cdf would be
* adding 1 to the value, as the value of x would be the integration over
* the entire range, which by definition, is equal to 1.
*/
//We perform the addition after the summation to reduce the difference size
if(weights.length == 0)//No weights
sum += Math.max(0, from);
else
sum += weights[from];
return sum / (X.length);
}
@Override
public double invCdf(double p)
{
int index;
double kd0;
if(weights.length == 0)
{
double r = p*X.length;
index = (int)r;
double pd0 = r - index, pd1 = 1-pd0;
kd0 = k.intK(pd1);
}
else//CDF can be found from the weights summings
{
double XEstimate = p*sumOFWeights;
index = Arrays.binarySearch(weights, XEstimate);
index = index < 0 ? -index-1 : index;
if(X[index] != 0)//TODO fix this bit
kd0 = 1.0;//-Math.abs((XEstimate-X[index])/X[index]);
else
kd0 = 1.0;
}
if(index == X.length-1)//at the tail end
return X[index]*kd0;
double x = X[index]*kd0 + X[index+1]*(1-kd0);
return x;
}
@Override
public double min()
{
return X[0]-h;
}
@Override
public double max()
{
return X[X.length-1]+h;
}
@Override
public String getDistributionName()
{
return "Kernel Density Estimate";
}
@Override
public String[] getVariables()
{
return new String[] { "h" } ;
}
@Override
public double[] getCurrentVariableValues()
{
return new double[] { h };
}
/**
* Sets the bandwidth used for smoothing. Higher values make the pdf smoother, but can
* obscure features. Too small a bandwidth will causes spikes at only the data points.
* @param val new bandwidth
*/
public void setBandwith(double val)
{
if(val <= 0 || Double.isInfinite(val))
throw new ArithmeticException("Bandwith parameter h must be greater than zero, not " + 0);
this.h = val;
}
/**
*
* @return the bandwidth parameter
*/
public double getBandwith()
{
return h;
}
@Override
public void setVariable(String var, double value)
{
if(var.equals("h"))
setBandwith(value);
}
@Override
public KernelDensityEstimator clone()
{
return new KernelDensityEstimator(X, h, Xmean, Xvar, Xskew, k, sumOFWeights, weights);
}
@Override
public void setUsingData(Vec data)
{
setUpX(data);
this.h = BandwithGuassEstimate(data);
}
@Override
public double mean()
{
return Xmean;
}
@Override
public double mode()
{
double maxP = 0, pTmp;
double maxV = Double.NaN;
for(int i = 0; i < X.length; i++)
if( (pTmp = pdf(X[i]) ) > maxP)
{
maxP = pTmp;
maxV = X[i];
}
return maxV;
}
@Override
public double variance()
{
return Xvar + h*h*k.k2();
}
@Override
public double skewness()
{
//TODO cant find anything about what this should really be...
return Xskew;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + Arrays.hashCode(X);
long temp;
temp = Double.doubleToLongBits(Xmean);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(Xskew);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(Xvar);
result = prime * result + (int) (temp ^ (temp >>> 32));
temp = Double.doubleToLongBits(h);
result = prime * result + (int) (temp ^ (temp >>> 32));
result = prime * result + ((k == null) ? 0 : k.hashCode());
temp = Double.doubleToLongBits(sumOFWeights);
result = prime * result + (int) (temp ^ (temp >>> 32));
result = prime * result + Arrays.hashCode(weights);
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof KernelDensityEstimator)) {
return false;
}
KernelDensityEstimator other = (KernelDensityEstimator) obj;
if (Double.doubleToLongBits(Xmean) != Double
.doubleToLongBits(other.Xmean)) {
return false;
}
if (Double.doubleToLongBits(Xskew) != Double
.doubleToLongBits(other.Xskew)) {
return false;
}
if (Double.doubleToLongBits(Xvar) != Double
.doubleToLongBits(other.Xvar)) {
return false;
}
if (Double.doubleToLongBits(h) != Double.doubleToLongBits(other.h)) {
return false;
}
if (Double.doubleToLongBits(sumOFWeights) != Double
.doubleToLongBits(other.sumOFWeights)) {
return false;
}
if (k == null) {
if (other.k != null) {
return false;
}
} else if (k.getClass()!=other.k.getClass()) {
return false;
}
if (!Arrays.equals(X, other.X)) {
return false;
}
if (!Arrays.equals(weights, other.weights)) {
return false;
}
return true;
}
}
| 13,373 | 28.26477 | 154 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/empirical/kernelfunc/BiweightKF.java |
package jsat.distributions.empirical.kernelfunc;
/**
*
* @author Edward Raff
*/
public class BiweightKF implements KernelFunction
{
private static final long serialVersionUID = -7199542934997154186L;
private BiweightKF()
{
}
private static class SingletonHolder
{
public static final BiweightKF INSTANCE = new BiweightKF();
}
/**
* Returns the singleton instance of this class
* @return the instance of this class
*/
public static BiweightKF getInstance()
{
return SingletonHolder.INSTANCE;
}
@Override
public double k(double u)
{
if(Math.abs(u) > 1)
return 0;
return Math.pow(1-u*u, 2)*(15.0/16.0);
}
@Override
public double intK(double u)
{
if(u < -1)
return 0;
if(u > 1)
return 1;
return Math.pow(u+1, 3)/16.0 * (3*u*u - 9*u + 8);
}
@Override
public double k2()
{
return 1.0/7.0;
}
@Override
public double cutOff()
{
return Math.ulp(1)+1;
}
@Override
public double kPrime(double u)
{
if(Math.abs(u) > 1)
return 0;
return (15.0/4.0)*u*(u*u-1);
}
@Override
public String toString()
{
return "Biweight Kernel";
}
}
| 1,326 | 16.233766 | 68 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/empirical/kernelfunc/EpanechnikovKF.java |
package jsat.distributions.empirical.kernelfunc;
/**
*
* @author Edward Raff
*/
public class EpanechnikovKF implements KernelFunction
{
private static final long serialVersionUID = 8688942176576932932L;
private EpanechnikovKF()
{
}
private static class SingletonHolder
{
public static final EpanechnikovKF INSTANCE = new EpanechnikovKF();
}
/**
* Returns the singleton instance of this class
* @return the instance of this class
*/
public static EpanechnikovKF getInstance()
{
return SingletonHolder.INSTANCE;
}
@Override
public double k(double u)
{
if(Math.abs(u) > 1)
return 0;
return (1-u*u)*(3.0/4.0);
}
@Override
public double intK(double u)
{
if(u < -1)
return 0;
if( u > 1)
return 1;
return (-u*u*u + 3 *u + 2)/4;
}
@Override
public double k2()
{
return 1.0/5.0;
}
@Override
public double cutOff()
{
return Math.ulp(1)+1;
}
@Override
public double kPrime(double u)
{
if(Math.abs(u) > 1)
return 0;
return - u *(3.0/2.0);
}
@Override
public String toString()
{
return "Epanechnikov Kernel";
}
}
| 1,309 | 16.236842 | 75 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/empirical/kernelfunc/GaussKF.java |
package jsat.distributions.empirical.kernelfunc;
import static java.lang.Math.*;
import jsat.distributions.Normal;
/**
*
* @author Edward Raff
*/
public class GaussKF implements KernelFunction
{
private static final long serialVersionUID = -6765390012694573184L;
private GaussKF()
{
}
private static class SingletonHolder
{
public static final GaussKF INSTANCE = new GaussKF();
}
/**
* Returns the singleton instance of this class
* @return the instance of this class
*/
public static GaussKF getInstance()
{
return SingletonHolder.INSTANCE;
}
@Override
public double k(double u)
{
return Normal.pdf(u, 0, 1);
}
@Override
public double intK(double u)
{
return Normal.cdf(u, 0, 1);
}
@Override
public double k2()
{
return 1;
}
@Override
public double cutOff()
{
/*
* This is not techincaly correct, as this value of k(u) is still 7.998827757006813E-38
* However, this is very close to zero, and is so small that k(u)+x = x, for most values of x.
* Unless this probability si going to be near zero, values past this point will have
* no effect on the result
*/
return 13;
}
@Override
public double kPrime(double u)
{
return -exp(-pow(u, 2)/2)*u/sqrt(2 * PI);
}
@Override
public String toString()
{
return "Gaussian Kernel";
}
}
| 1,515 | 18.947368 | 104 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/empirical/kernelfunc/KernelFunction.java |
package jsat.distributions.empirical.kernelfunc;
import java.io.Serializable;
/**
* Class for representing one dimensional kernel functions. Since they require
* no parameters and have no need for duplication, its is advised to make
* them singletons.
*
* See http://en.wikipedia.org/wiki/Kernel_(statistics)
*
* @author Edward Raff
*/
public interface KernelFunction extends Serializable
{
/**
* Returns the weight to be applied to a sample for the normalized distance of two data points.
* @param u the distance of the data points
* @return the value in [0, 1) of the amount of weight to give to the sample based on its distance
*/
public double k(double u);
/**
* Computes the value of the finite integral from -Infinity up to the value u, of the function given by {@link #k(double) }
* @param u the distance of the data points
* @return the value of the integration
*/
public double intK(double u);
/**
*
* Returns the value of the derivative at a point, k'(u)
* @param u the distance of the data points
* @return the value of the derivative at <tt>u</tt>
*/
public double kPrime(double u);
/**
* Returns the variance of the kernel function
* @return the variance of the kernel function
*/
public double k2();
/**
* As the value of |u| for the kernel function approaches infinity, the
* value of k(u) approaches zero. This function returns the minimal
* absolute value of u for which k(u) returns 0
*
* @return the first value for which k(u) = 0
*/
public double cutOff();
/**
* Returns the name of this kernel function
* @return the name of this kernel function
*/
@Override
public String toString();
}
| 1,827 | 29.466667 | 127 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/empirical/kernelfunc/TriweightKF.java |
package jsat.distributions.empirical.kernelfunc;
/**
*
* @author Edward Raff
*/
public class TriweightKF implements KernelFunction
{
private static final long serialVersionUID = -9156392658970318676L;
private TriweightKF()
{
}
private static class SingletonHolder
{
public static final TriweightKF INSTANCE = new TriweightKF();
}
/**
* Returns the singleton instance of this class
* @return the instance of this class
*/
public static TriweightKF getInstance()
{
return SingletonHolder.INSTANCE;
}
@Override
public double k(double u)
{
if(Math.abs(u) > 1)
return 0;
return Math.pow(1 - u*u, 3)*(35.0/32.0);
}
@Override
public double intK(double u)
{
if(u < -1)
return 0;
if(u > 1)
return 1;
return (-5*Math.pow(u, 7) + 21*Math.pow(u, 5) - 35 * Math.pow(u, 3) + 35 *u + 16)/32;
}
@Override
public double k2()
{
return 1.0/9.0;
}
@Override
public double cutOff()
{
return Math.ulp(1)+1;
}
@Override
public double kPrime(double u)
{
if(Math.abs(u) > 1)
return 0;
return -u;
}
@Override
public String toString()
{
return "Triweight Kernel";
}
}
| 1,354 | 16.828947 | 93 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/empirical/kernelfunc/UniformKF.java |
package jsat.distributions.empirical.kernelfunc;
/**
*
* @author Edward Raff
*/
public class UniformKF implements KernelFunction
{
private static final long serialVersionUID = -6413579643511350896L;
private UniformKF()
{
}
private static class SingletonHolder
{
public static final UniformKF INSTANCE = new UniformKF();
}
/**
* Returns the singleton instance of this class
* @return the instance of this class
*/
public static UniformKF getInstance()
{
return SingletonHolder.INSTANCE;
}
@Override
public double k(double u)
{
if(Math.abs(u) > 1)
return 0;
return 0.5;
}
@Override
public double intK(double u)
{
if(u < -1)
return 0;
if (u > 1)
return 1;
return (u+1)/2;
}
@Override
public double k2()
{
return 1.0/3.0;
}
@Override
public double cutOff()
{
return Math.ulp(1)+1;
}
@Override
public double kPrime(double u)
{
return 0;
}
@Override
public String toString()
{
return "Uniform Kernel";
}
}
| 1,189 | 15.081081 | 68 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/kernels/BaseKernelTrick.java |
package jsat.distributions.kernels;
import java.util.List;
import jsat.linear.Vec;
/**
* This provides a simple base implementation for the cache related methods in
* Kernel Trick. By default they will all call
* {@link #eval(jsat.linear.Vec, jsat.linear.Vec) } directly. For this reason
* {@link #supportsAcceleration() } defaults to returning false. If the Kernel
* supports cache acceleration, {@link #evalSum(java.util.List, java.util.List,
* double[], jsat.linear.Vec, int, int) } will make use of the acceleration.
* All other methods must be overloaded.
*
* @author Edward Raff
*/
public abstract class BaseKernelTrick implements KernelTrick
{
private static final long serialVersionUID = 7230585838672226751L;
@Override
public boolean supportsAcceleration()
{
return false;
}
@Override
public List<Double> getAccelerationCache(List<? extends Vec> trainingSet)
{
return null;
}
@Override
public List<Double> getQueryInfo(Vec q)
{
return null;
}
@Override
public void addToCache(Vec newVec, List<Double> cache)
{
}
@Override
public double eval(int a, int b, List<? extends Vec> trainingSet, List<Double> cache)
{
return eval(trainingSet.get(a), trainingSet.get(b));
}
@Override
public double eval(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache)
{
return eval(vecs.get(a), b);
}
@Override
public double evalSum(List<? extends Vec> finalSet, List<Double> cache, double[] alpha, Vec y, int start, int end)
{
return evalSum(finalSet, cache, alpha, y, getQueryInfo(y), start, end);
}
@Override
public double evalSum(List<? extends Vec> finalSet, List<Double> cache, double[] alpha, Vec y, List<Double> qi, int start, int end)
{
double sum = 0;
for(int i = start; i < end; i++)
sum += alpha[i] * eval(i, y, qi, finalSet, cache);
return sum;
}
@Override
abstract public KernelTrick clone();
@Override
public boolean normalized()
{
return false;
}
}
| 2,196 | 25.154762 | 135 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/kernels/BaseL2Kernel.java | package jsat.distributions.kernels;
import java.util.List;
import jsat.linear.Vec;
import jsat.parameters.Parameter;
import jsat.utils.DoubleList;
/**
* Many Kernels can be described in terms the L2 norm with some operations
* performed on it. For example, the {@link RBFKernel} and
* {@link RationalQuadraticKernel} can both be expressed in terms of the
* L<sub>2</sub> norm of the two inputs. To simplify the addition of other
* kernels based on the same norm, this base class exists.<br>
* <br>
* All Kernels extending this base kernel support acceleration. This is done making use of the fact that
* || x - y ||<sup>2</sup> = x x' +y y' - 2 x y' <br>
* Thus the cached value for each vector is its self dot product.
* @author Edward Raff
*/
public abstract class BaseL2Kernel implements KernelTrick
{
private static final long serialVersionUID = 2917497058710848085L;
@Override
abstract public double eval(Vec a, Vec b);
@Override
abstract public KernelTrick clone();
@Override
public boolean supportsAcceleration()
{
return true;
}
/**
* Returns the squared L<sup>2</sup> norm between two points from the cache values.
* @param i the first index in the vector list
* @param j the second index in the vector list
* @param vecs the list of vectors that make the collection
* @param cache the cache of values for each vector in the collection
* @return the squared norm ||x<sub>i</sub>-x<sub>j</sub>||<sup>2</sup>
*/
protected double getSqrdNorm(int i, int j, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return Math.pow(vecs.get(i).pNormDist(2.0, vecs.get(j)), 2);
return cache.get(i)+cache.get(j)-2*vecs.get(i).dot(vecs.get(j));
}
/**
* Returns the squared L<sup>2</sup> norm of the given point from the cache
* @param i the index in the vector list to get the squared norm from
* @param vecs the list of vectors that make the collection
* @param cache the cache of values for each vector in the collection
* @return the squared norm ||x<sub>i</sub>||<sup>2</sup>
*/
protected double getSqrdNorm(int i, List<? extends Vec> vecs, List<Double> cache)
{
return cache.get(i);
}
/**
* Returns the squared L<sup>2</sup> norm between a point in the cache and one with a provided qi value
* @param i the index in the vector list
* @param y the other vector
* @param qi the acceleration values for the other vector
* @param vecs the list of vectors to make the collection
* @param cache the cache of values for each vector in the collection
* @return the squared norm ||x<sub>i</sub>-y||<sup>2</sup>
*/
protected double getSqrdNorm(int i, Vec y, List<Double> qi, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return Math.pow(vecs.get(i).pNormDist(2.0, y), 2);
return cache.get(i)+qi.get(0)-2*vecs.get(i).dot(y);
}
@Override
public List<Double> getAccelerationCache(List<? extends Vec> trainingSet)
{
DoubleList cache = new DoubleList(trainingSet.size());
for(int i = 0; i < trainingSet.size(); i++)
cache.add(trainingSet.get(i).dot(trainingSet.get(i)));
return cache;
}
@Override
public List<Double> getQueryInfo(Vec q)
{
DoubleList dl = new DoubleList(1);
dl.add(q.dot(q));
return dl;
}
@Override
public void addToCache(Vec newVec, List<Double> cache)
{
cache.add(newVec.dot(newVec));
}
@Override
abstract public double eval(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache);
@Override
abstract public double eval(int a, int b, List<? extends Vec> trainingSet, List<Double> cache);
@Override
public double evalSum(List<? extends Vec> finalSet, List<Double> cache, double[] alpha, Vec y, int start, int end)
{
return evalSum(finalSet, cache, alpha, y, getQueryInfo(y), start, end);
}
@Override
public double evalSum(List<? extends Vec> finalSet, List<Double> cache, double[] alpha, Vec y, List<Double> qi, int start, int end)
{
double sum = 0;
for(int i = start; i < end; i++)
if(alpha[i] != 0.0)
sum += alpha[i] * eval(i, y, qi, finalSet, cache);
return sum;
}
}
| 4,473 | 34.228346 | 135 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/kernels/DistanceMetricBasedKernel.java | package jsat.distributions.kernels;
import java.util.List;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.parameters.Parameter;
import jsat.parameters.Parameter.ParameterHolder;
/**
* This abstract class provides the means of implementing a Kernel based off
* some {@link DistanceMetric}. This will pre-implement most of the methods of
* the KernelTrick interface, including using the distance acceleration of the
* metric (if supported) when appropriate.
*
* @author Edward Raff
*/
public abstract class DistanceMetricBasedKernel implements KernelTrick
{
private static final long serialVersionUID = 8395066824809874527L;
/**
* the distance metric to use for the Kernel
*/
@ParameterHolder
protected DistanceMetric d;
/**
* Creates a new distance based kerenel
* @param d the distance metric to use
*/
public DistanceMetricBasedKernel(DistanceMetric d)
{
this.d = d;
}
@Override
abstract public KernelTrick clone();
@Override
public boolean supportsAcceleration()
{
return d.supportsAcceleration();
}
@Override
public List<Double> getAccelerationCache(List<? extends Vec> trainingSet)
{
return d.getAccelerationCache(trainingSet);
}
@Override
public List<Double> getQueryInfo(Vec q)
{
return d.getQueryInfo(q);
}
@Override
public void addToCache(Vec newVec, List<Double> cache)
{
cache.addAll(d.getQueryInfo(newVec));
}
@Override
public double evalSum(List<? extends Vec> finalSet, List<Double> cache, double[] alpha, Vec y, int start, int end)
{
return evalSum(finalSet, cache, alpha, y, d.getQueryInfo(y), start, end);
}
@Override
public double evalSum(List<? extends Vec> finalSet, List<Double> cache, double[] alpha, Vec y, List<Double> qi, int start, int end)
{
double sum = 0;
for (int i = start; i < end; i++)
if (alpha[i] != 0)
sum += alpha[i] * eval(i, y, qi, finalSet, cache);
return sum;
}
}
| 2,127 | 25.271605 | 135 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/kernels/GeneralRBFKernel.java | package jsat.distributions.kernels;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.distributions.Distribution;
import jsat.distributions.LogUniform;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.utils.DoubleList;
import jsat.utils.IntList;
import jsat.utils.ListUtils;
/**
* This class provides a generalization of the {@link RBFKernel} to arbitrary
* {@link DistanceMetric distance metrics}, and is of the form
* <i>exp(-d(x, y)<sup>2</sup>/(2 {@link #setSigma(double) σ}<sup>2</sup>
* ))</i>. So long as the distance metric is valid, the resulting kernel trick
* will be a valid kernel. <br>
* <br>
* If the {@link EuclideanDistance} is used, then this becomes equivalent to the
* {@link RBFKernel}. <br>
* <br>
* Note, that since the {@link KernelTrick} has no concept of training - the
* distance metric can not require training either. A pre-trained metric can
* be admissible thought.
*
* @author Edward Raff
*/
public class GeneralRBFKernel extends DistanceMetricBasedKernel
{
private static final long serialVersionUID = 1368225926995372017L;
private double sigma;
private double sigmaSqrd2Inv;
/**
* Creates a new Generic RBF Kernel
* @param d the distance metric to use
* @param sigma the standard deviation to use
*/
public GeneralRBFKernel(DistanceMetric d, double sigma)
{
super(d);
setSigma(sigma);
}
/**
* Sets the kernel width parameter, which must be a positive value. Larger
* values indicate a larger width
*
* @param sigma the sigma value
*/
public void setSigma(double sigma)
{
if(sigma <= 0 || Double.isNaN(sigma) || Double.isInfinite(sigma))
throw new IllegalArgumentException("Sigma must be a positive constant, not " + sigma);
this.sigma = sigma;
this.sigmaSqrd2Inv = 0.5/(sigma*sigma);
}
/**
*
* @return the width parameter to use for the kernel
*/
public double getSigma()
{
return sigma;
}
@Override
public KernelTrick clone()
{
return new GeneralRBFKernel(d.clone(), sigma);
}
@Override
public double eval(Vec a, Vec b)
{
double dist = d.dist(a, b);
return Math.exp(-dist*dist * sigmaSqrd2Inv);
}
@Override
public double eval(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache)
{
double dist = d.dist(a, b, qi, vecs, cache);
return Math.exp(-dist*dist * sigmaSqrd2Inv);
}
@Override
public double eval(int a, int b, List<? extends Vec> vecs, List<Double> cache)
{
double dist = d.dist(a, b, vecs, cache);
return Math.exp(-dist*dist * sigmaSqrd2Inv);
}
/**
* Guess the distribution to use for the kernel width term
* {@link #setSigma(double) σ} in the General RBF kernel.
*
* @param d the data set to get the guess for
* @return the guess for the σ parameter in the General RBF Kernel
*/
public Distribution guessSigma(DataSet d)
{
return guessSigma(d, this.d);
}
/**
* Guess the distribution to use for the kernel width term
* {@link #setSigma(double) σ} in the General RBF kernel.
*
* @param d the data set to get the guess for
* @param dist the distance metric to assume is being used in the kernel
* @return the guess for the σ parameter in the General RBF Kernel
*/
public static Distribution guessSigma(DataSet d, DistanceMetric dist)
{
//we will use a simple strategy of estimating the mean sigma to test based on the pair wise distances of random points
//to avoid n^2 work for this, we will use a sqrt(n) sized sample as n increases so that we only do O(n) work
List<Vec> allVecs = d.getDataVectors();
int toSample = d.size();
if (toSample > 5000)
toSample = 5000 + (int) Math.floor(Math.sqrt(d.size() - 5000));
DoubleList vals = new DoubleList(toSample*toSample);
if (d instanceof ClassificationDataSet && ((ClassificationDataSet) d).getPredicting().getNumOfCategories() == 2)
{
ClassificationDataSet cdata = (ClassificationDataSet) d;
List<Vec> class0 = new ArrayList<Vec>(toSample / 2);
List<Vec> class1 = new ArrayList<Vec>(toSample / 2);
IntList randOrder = new IntList(d.size());
ListUtils.addRange(randOrder, 0, d.size(), 1);
Collections.shuffle(randOrder);
//collet a random sample of data
for (int i = 0; i < randOrder.size(); i++)
{
int indx = randOrder.getI(i);
if (cdata.getDataPointCategory(indx) == 0 && class0.size() < toSample / 2)
class0.add(cdata.getDataPoint(indx).getNumericalValues());
else if (cdata.getDataPointCategory(indx) == 1 && class0.size() < toSample / 2)
class1.add(cdata.getDataPoint(indx).getNumericalValues());
}
int j_start = class0.size();
class0.addAll(class1);
List<Double> cache = dist.getAccelerationCache(class0);
for (int i = 0; i < j_start; i++)
for (int j = j_start; j < class0.size(); j++)
vals.add(dist.dist(i, j, allVecs, cache));
}
else
{
Collections.shuffle(allVecs);
if (d.size() > 5000)
allVecs = allVecs.subList(0, toSample);
List<Double> cache = dist.getAccelerationCache(allVecs);
for (int i = 0; i < allVecs.size(); i++)
for (int j = i + 1; j < allVecs.size(); j++)
vals.add(dist.dist(i, j, allVecs, cache));
}
Collections.sort(vals);
double median = vals.get(vals.size()/2);
return new LogUniform(Math.exp(Math.log(median)-4), Math.exp(Math.log(median)+4));
}
@Override
public boolean normalized()
{
return true;
}
}
| 6,308 | 33.47541 | 126 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/kernels/KernelPoint.java | package jsat.distributions.kernels;
import static java.lang.Math.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import jsat.classifiers.linear.kernelized.Projectron;
import jsat.linear.*;
import jsat.math.FastMath;
import jsat.math.Function;
import jsat.math.Function1D;
import jsat.math.optimization.GoldenSearch;
import jsat.regression.KernelRLS;
import jsat.utils.DoubleList;
import jsat.utils.ListUtils;
import jsat.utils.random.RandomUtil;
/**
* The Kernel Point represents a kernelized weight vector by a linear
* combination of vectors transformed through a
* {@link KernelTrick kernel fuctiion}. This implementation allows the selection
* of multiple different budget maintenance strategies <br>
* <br>
* See {@link KernelRLS} and {@link Projectron} for methods and papers based on
* the same ideas used to create this class. <br>
* Credit goes to Davis King of the <a href="http://dlib.net/ml.html">dlib
* library</a> for the idea of this type of class. <br>
* <br>
* Changing the
* {@link #setBudgetStrategy(jsat.distributions.kernels.KernelPoint.BudgetStrategy)
* budget maintinance method} or other parameters should be done <i>before</i>
* adding any data points to the KernelPoint. <br>
* If a maximum budget is specified, it may always be increased - but may not be
* decreased.
*
* @author Edward Raff
*/
public class KernelPoint
{
protected KernelTrick k;
private double errorTolerance;
protected List<Vec> vecs;
protected List<Double> kernelAccel;
protected Matrix K;
protected Matrix InvK;
protected Matrix KExpanded;
protected Matrix InvKExpanded;
protected DoubleList alpha;
protected BudgetStrategy budgetStrategy = BudgetStrategy.PROJECTION;
protected int maxBudget = Integer.MAX_VALUE;
/**
* These enums control the method used to reduce the size of the support
* vector set in the kernel point.
*/
public enum BudgetStrategy
{
/**
* The budget is maintained by projecting the incoming vector onto
* the set of current vectors. If the error in the projection is less
* than {@link #setErrorTolerance(double) } the projection is used, and
* the input is added to the support vector set if the error was too
* large. <br>
* Once the maximum budget size is reached, the projection is used
* regardless of the error of the projection. <br>
* <br>
* The time complexity of each update is <i>O(B<sup>2</sup>)</i> and
* uses <i>O(B<sup>2</sup>)</i> memory.
*
*/
PROJECTION,
/**
* The budget is maintained by merging two support vectors to minimize
* the error in the squared norm. The merged support vector is not a
* member of the training set. <b>This method is only valid for the
* {@link RBFKernel} </b>. Using any other kernel may cause invalid
* results<br>
* <br>
* See:<br>
* <ul>
* <li>Wang, Z., Crammer, K.,&Vucetic, S. (2012). <i>Breaking the
* Curse of Kernelization : Budgeted Stochastic Gradient Descent for
* Large-Scale SVM Training</i>. The Journal of Machine Learning
* Research, 13(1), 3103–3131.</li>
* <li>Wang, Z., Crammer, K.,&Vucetic, S. (2010). <i>Multi-class
* pegasos on a budget</i>. In 27th International Conference on Machine
* Learning (pp. 1143–1150). Retrieved from
* <a href="http://www.ist.temple.edu/~vucetic/documents/wang10icml.pdf">
* here</a></li>
* </ul>
* <br>
* The time complexity of each update is <i>O(B)</i> and
* uses <i>O(B)</i> memory.
*/
MERGE_RBF,
/**
* The budget is maintained by refusing to add new data points once the
* budget is reached. <br>
* <br>
* The time complexity of each update is <i>O(B)</i> and
* uses <i>O(B)</i> memory.
*/
STOP,
/**
* The budget is maintained by randomly dropping a previous support
* vector. <br>
* <br>
* The time complexity of each update is <i>O(B)</i> and
* uses <i>O(B)</i> memory.
*/
RANDOM,
}
//Internal structure
private double sqrdNorm = 0;
private boolean normGood = true;
/**
* Creates a new Kernel Point, which is a point in the kernel space
* represented by an accumulation of vectors and uses the
* {@link BudgetStrategy#PROJECTION} strategy with an unbounded maximum
* budget
*
* @param k the kernel to use
* @param errorTolerance the maximum error in [0, 1] allowed for projecting
* a vector instead of adding it to the basis set
*/
public KernelPoint(KernelTrick k, double errorTolerance)
{
this.k = k;
setErrorTolerance(errorTolerance);
setBudgetStrategy(BudgetStrategy.PROJECTION);
setMaxBudget(Integer.MAX_VALUE);
if(k.supportsAcceleration())
kernelAccel = new DoubleList(16);
alpha = new DoubleList(16);
vecs = new ArrayList<Vec>(16);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public KernelPoint(KernelPoint toCopy)
{
this.k = toCopy.k.clone();
this.errorTolerance = toCopy.errorTolerance;
if(toCopy.vecs != null)
{
this.vecs = new ArrayList<Vec>(toCopy.vecs.size());
for(Vec v : toCopy.vecs)
this.vecs.add(v.clone());
if(toCopy.kernelAccel != null)
this.kernelAccel = new DoubleList(toCopy.kernelAccel);
this.alpha = new DoubleList(toCopy.alpha);
}
if(toCopy.KExpanded != null)
{
this.KExpanded = toCopy.KExpanded.clone();
this.InvKExpanded = toCopy.InvKExpanded.clone();
this.K = new SubMatrix(KExpanded, 0, 0, toCopy.K.rows(), toCopy.K.cols());
this.InvK = new SubMatrix(InvKExpanded, 0, 0, toCopy.InvK.rows(), toCopy.InvK.rows());
}
this.maxBudget = toCopy.maxBudget;
this.sqrdNorm = toCopy.sqrdNorm;
this.normGood = toCopy.normGood;
}
/**
* Sets the maximum budget for support vectors to allow. Setting to
* {@link Integer#MAX_VALUE} is essentially an unbounded number of support
* vectors. Increasing the budget after adding the first vector is always
* allowed, but it may not be possible to reduce the number of current
* support vectors is above the desired budget.
*
* @param maxBudget the maximum number of allowed support vectors
*/
public void setMaxBudget(int maxBudget)
{
if(maxBudget < 1)
throw new IllegalArgumentException("Budget must be positive, not " + maxBudget);
this.maxBudget = maxBudget;
}
/**
* Returns the current maximum budget for support vectors
* @return the maximum budget for support vectors
*/
public int getMaxBudget()
{
return maxBudget;
}
/**
* Sets the method used for maintaining the budget of support vectors. This
* method must be called <i>before</i> any vectors are added to the
* KernelPoint. <br>
* <br>
* The budget maintenance strategy used controls the time complexity and
* memory use of the model.
* @param budgetStrategy the budget maintenance strategy
*/
public void setBudgetStrategy(BudgetStrategy budgetStrategy)
{
if(getBasisSize() > 0)
throw new RuntimeException("KerenlPoint already started, budget may not be changed");
this.budgetStrategy = budgetStrategy;
}
/**
* Returns the budget method used
* @return the budget method used
*/
public BudgetStrategy getBudgetStrategy()
{
return budgetStrategy;
}
/**
* Sets the error tolerance used for projection maintenance strategies such
* as {@link BudgetStrategy#PROJECTION}
* @param errorTolerance the error tolerance in [0, 1]
*/
public void setErrorTolerance(double errorTolerance)
{
if(Double.isNaN(errorTolerance) || errorTolerance < 0 || errorTolerance > 1)
throw new IllegalArgumentException("Error tolerance must be in [0, 1], not " + errorTolerance);
this.errorTolerance = errorTolerance;
}
/**
* Returns the error tolerance that is used depending on the
* {@link BudgetStrategy} in use
* @return the error tolerance value
*/
public double getErrorTolerance()
{
return errorTolerance;
}
/**
* Returns the squared values of the 2 norm of the point this object
* represents
*
* @return the squared value of the 2 norm
*/
public double getSqrdNorm()
{
if(!normGood)
{
sqrdNorm = 0;
for(int i = 0; i < alpha.size(); i++)
{
if(K != null)//we already know all the values of K
{
sqrdNorm += alpha.get(i)*alpha.get(i)*K.get(i, i);
for(int j = i+1; j < alpha.size(); j++)
sqrdNorm += 2*alpha.get(i)*alpha.get(j)*K.get(i, j);
}
else//nope, compute as needed
{
sqrdNorm += alpha.get(i)*alpha.get(i)*k.eval(i, i, vecs, kernelAccel);
for(int j = i+1; j < alpha.size(); j++)
sqrdNorm += 2*alpha.get(i)*alpha.get(j)*k.eval(i, j, vecs, kernelAccel);
}
}
normGood = true;
}
return sqrdNorm;
}
/**
* Computes the dot product between the kernel point this object represents
* and the given input vector in the kernel space.
*
* @param x the input vector to work with
* @return the dot product in the kernel space between this point and {@code x}
*/
public double dot(Vec x)
{
return dot(x, k.getQueryInfo(x));
}
/**
* Computes the dot product between the kernel point this object represents
* and the given input vector in the kernel space
*
* @param x the input vector to work with
* @param qi the query information for the vector, or {@code null} only if
* the kernel in use does not support acceleration.
* @return the dot product in the kernel space between this point and {@code x}
*/
public double dot(Vec x, List<Double> qi)
{
if(getBasisSize() == 0)
return 0;
return k.evalSum(vecs, kernelAccel, alpha.getBackingArray(), x, qi, 0, alpha.size());
}
/**
* Returns the dot product between this point and another in the kernel
* space
* @param x the point to take the dot product with
* @return the dot product in the kernel space between this point and {@code x}
*/
public double dot(KernelPoint x)
{
if(getBasisSize() == 0 || x.getBasisSize() == 0)
return 0;
int shift = this.alpha.size();
List<Vec> mergedVecs = ListUtils.mergedView(this.vecs, x.vecs);
List<Double> mergedCache;
if(this.kernelAccel == null || x.kernelAccel == null)
mergedCache = null;
else
mergedCache = ListUtils.mergedView(this.kernelAccel, x.kernelAccel);
double dot = 0;
for(int i = 0; i < this.alpha.size(); i++)
for(int j = 0; j < x.alpha.size(); j++)
{
dot += this.alpha.get(i)*x.alpha.get(j)*k.eval(i, j+shift, mergedVecs, mergedCache);
}
return dot;
}
/**
* Computes the Euclidean distance between this kernel point and the given
* input in the kernel space
* @param x the input vector to work with
* @return the Euclidean distance between this point and {@code x} in the
* kernel space
*/
public double dist(Vec x)
{
return dist(x, k.getQueryInfo(x));
}
/**
* Computes the Euclidean distance between this kernel point and the given
* input in the kernel space
* @param x the input vector to work with
* @param qi the query information for the vector, or {@code null} only if
* the kernel in use does not support acceleration.
* @return the Euclidean distance between this point and {@code x} in the
* kernel space
*/
public double dist(Vec x, List<Double> qi)
{
double k_xx = k.eval(0, 0, Arrays.asList(x), qi);
return Math.sqrt(k_xx+getSqrdNorm()-2*dot(x, qi));
}
/**
* Computes the Euclidean distance between this kernel point and the given
* kernel point in the kernel space
* @param x the input point to work with
* @return the Euclidean distance between this point and {@code x} in the
* kernel space
*/
public double dist(KernelPoint x)
{
if(this == x)//dist to self is 0
return 0;
double d = this.getSqrdNorm() + x.getSqrdNorm() - 2 * dot(x);
return Math.sqrt(Math.max(0, d));//Avoid rare cases wehre 2*dot might be slightly larger
}
/**
* Alters this point to be multiplied by the given value
* @param c the value to multiply by
*/
public void mutableMultiply(double c)
{
if(Double.isNaN(c) || Double.isInfinite(c))
throw new IllegalArgumentException("multiplier must be a real value, not " + c);
if(getBasisSize() == 0)
return;
sqrdNorm *= c*c;
alpha.getVecView().mutableMultiply(c);
}
/**
* Alters this point to contain the given input vector as well
* @param x_t the vector to add
*/
public void mutableAdd(Vec x_t)
{
mutableAdd(1.0, x_t);
}
/**
* Alters this point to contain the given input vector as well
* @param c the multiplicative constant to apply with the vector
* @param x_t the vector to add
*/
public void mutableAdd(double c, Vec x_t)
{
mutableAdd(c, x_t, k.getQueryInfo(x_t));
}
/**
* Alters this point to contain the given input vector as well
* @param c the multiplicative constant to apply with the vector
* @param x_t the vector to add
* @param qi the query information for the vector, or {@code null} only if
* the kernel in use does not support acceleration.
*/
public void mutableAdd(double c, Vec x_t, final List<Double> qi)
{
if(c == 0)
return;
normGood = false;
double y_t = c;
final double k_tt = k.eval(0, 0, Arrays.asList(x_t), qi);
if(budgetStrategy == BudgetStrategy.PROJECTION)
{
if(K == null)//first point to be added
{
KExpanded = new DenseMatrix(16, 16);
K = new SubMatrix(KExpanded, 0, 0, 1, 1);
K.set(0, 0, k_tt);
InvKExpanded = new DenseMatrix(16, 16);
InvK = new SubMatrix(InvKExpanded, 0, 0, 1, 1);
InvK.set(0, 0, 1/k_tt);
alpha.add(y_t);
vecs.add(x_t);
if(kernelAccel != null)
kernelAccel.addAll(qi);
return;
}
//Normal case
DenseVector kxt = new DenseVector(K.rows());
for (int i = 0; i < kxt.length(); i++)
kxt.set(i, k.eval(i, x_t, qi, vecs, kernelAccel));
//ALD test
final Vec alphas_t = InvK.multiply(kxt);
final double delta_t = k_tt-alphas_t.dot(kxt);
final int size = K.rows();
if(delta_t > errorTolerance && size < maxBudget)//add to the dictionary
{
vecs.add(x_t);
if(kernelAccel != null)
kernelAccel.addAll(qi);
if(size == KExpanded.rows())//we need to grow first
{
KExpanded.changeSize(size*2, size*2);
InvKExpanded.changeSize(size*2, size*2);
}
Matrix.OuterProductUpdate(InvK, alphas_t, alphas_t, 1/delta_t);
K = new SubMatrix(KExpanded, 0, 0, size+1, size+1);
InvK = new SubMatrix(InvKExpanded, 0, 0, size+1, size+1);
//update bottom row and side columns
for(int i = 0; i < size; i++)
{
K.set(size, i, kxt.get(i));
K.set(i, size, kxt.get(i));
InvK.set(size, i, -alphas_t.get(i)/delta_t);
InvK.set(i, size, -alphas_t.get(i)/delta_t);
}
//update bottom right corner
K.set(size, size, k_tt);
InvK.set(size, size, 1/delta_t);
alpha.add(y_t);
}
else//project onto dictionary
{
Vec alphaVec = alpha.getVecView();
alphaVec.mutableAdd(y_t, alphas_t);
normGood = false;
}
}
else if(budgetStrategy == BudgetStrategy.MERGE_RBF)
{
normGood = false;
addPoint(x_t, qi, y_t);
if(vecs.size() > maxBudget)
{
/*
* we use the same approximation method as in projection
* (Section 4.2) by fixing m as theSV with the smallest value
* of || α_m ||^2
*/
int m = 0;
double alpha_m = abs(alpha.get(m));
for(int i = 1; i < alpha.size(); i++)
if(abs(alpha.getD(i)) < abs(alpha_m))
{
alpha_m = alpha.getD(i);
m = i;
}
double minLoss = Double.POSITIVE_INFINITY;
int n = -1;
double n_h = 0;
double n_alpha_z = 0;
double tol = 1e-3;
while (n == -1)
{
for (int i = 0; i < alpha.size(); i++)
{
if (i == m)
continue;
double a_m = alpha_m, a_n = alpha.getD(i);
double normalize = a_m+a_n;
if (abs(normalize) < tol)//avoid alphas that nearly cancle out
continue;
final double k_mn = k.eval(i, m, vecs, kernelAccel);
double h = getH(k_mn, a_m/normalize, a_n/normalize);
/*
* we can get k(m, z) without forming z when using RBF
*
* exp(-(m-z)^2) = exp(-(m- (h m+(1-h) n))^2 ) =
* exp(-(x-y)^2(h-1)^2) = exp((x-y)^2)^(h-1)^2
*
* and since: 0 < h < 1 (h-1)^2 = (1-h)^2
*/
double k_mz = pow(k_mn, (1 - h) * (1 - h));
double k_nz = pow(k_mn, h * h);
//TODO should we fall back to forming z if we use a non RBF kernel?
/*
* Determin the best by the smallest change in norm, 2x2
* matrix for the original alphs and alpha_z on its own
*/
double alpha_z = a_m * k_mz + a_n * k_nz;
double loss = a_m * a_m + a_n * a_n
+ 2 * k_mn * a_m * a_n
- alpha_z*alpha_z;
if (loss < minLoss)
{
minLoss = loss;
n = i;
n_h = h;
n_alpha_z = alpha_z;
}
}
tol /= 10;
}
Vec n_z = vecs.get(m).multiply(n_h);
n_z.mutableAdd(1-n_h, vecs.get(n));
final List<Double> nz_qi = k.getQueryInfo(n_z);
finalMergeStep(m, n, n_z, nz_qi, n_alpha_z, true);
}
}
else if(budgetStrategy == BudgetStrategy.STOP)
{
normGood = false;
if(getBasisSize() < maxBudget)
addPoint(x_t, qi, y_t);
}
else if(budgetStrategy == BudgetStrategy.RANDOM)
{
normGood = false;
if(getBasisSize() >= maxBudget)
{
Random rand = RandomUtil.getRandom();//TODO should probably move this out
int toRemove = rand.nextInt(vecs.size());
removeIndex(toRemove);
}
addPoint(x_t, qi, y_t);
}
else
throw new RuntimeException("BUG: report me!");
}
/**
* Adds a point to the set
* @param x_t the value to add
* @param qi the query information for the value
* @param y_t the constant value to add
*/
private void addPoint(Vec x_t, final List<Double> qi, double y_t)
{
vecs.add(x_t);
if (kernelAccel != null)
kernelAccel.addAll(qi);
alpha.add(y_t);
}
/**
* Performs the last merging step removing the old vecs and adding the new
* merged one
* @param m the first of the original index to remove
* @param n the second of the original index to remove
* @param n_z the merged vec to replace them with
* @param nz_qi the query info for the new vec
* @param n_alpha_z the alpha value for the new merged vec
*/
protected void finalMergeStep(int m, int n, Vec n_z, final List<Double> nz_qi, double n_alpha_z, boolean alterVecs)
{
int smallIndx = min(m, n);
int largeIndx = max(m, n);
alpha.remove(largeIndx);
alpha.remove(smallIndx);
if(alterVecs)
{
vecs.remove(largeIndx);
vecs.remove(smallIndx);
kernelAccel.remove(largeIndx);
kernelAccel.remove(smallIndx);
vecs.add(n_z);
//XXX the following check was redundant
// if (kernelAccel != null)
kernelAccel.addAll(nz_qi);
}
alpha.add(n_alpha_z);
}
/**
* Gets the minimum of H in [0, 1] the for RBF merging<br>
* a<sub>m</sub>k<sub>mn</sub><sup>(1-h)^2</sup> + a<sub>n</sub>k<sub>mn</sub><sup>h^2</sup>
* <br>.
* THIS METHOD IS A BOTTLE NECK, so it has some optimization hacks<br>
* Only one of the coefficients can be negative.
* @param k_mn the shared kernel value on both halves of the equation
* @param a_m the first coefficient
* @param a_n the second coefficient
* @return the value of h that maximizes the response
*/
protected static double getH(final double k_mn, final double a_m, final double a_n)
{
if(a_m == a_n)
return 0.5;
final Function1D f = (double x) ->
{
final double h = x;
//negative to maximize isntead of minimize
/*
* We aren't solving to a super high degree of accuracy anyway,
* so use an approximate pow. Its impact is only noticible for
* very small budget sizes
*/
return -(a_m * FastMath.pow(k_mn, (1 - h) * (1 - h)) + a_n * FastMath.pow(k_mn, h * h));
};
/*
* Only a few iterations of golden search are done. Often the exact min
* is very nearly 0 or 1, and that dosn't seem to really help with the
* merging. I've gotten better generalization so far by allowing only a
* small number of fininte steps.
*/
/*
* if one is pos and the other is negative, the minimum value is going
* to be near 0 or 1
*/
if(Math.signum(a_m) != Math.signum(a_n))
if(a_m < 0)//we give a
return GoldenSearch.minimize(1e-3, 100, 0.0, 0.2, f);
else if(a_n < 0)
return GoldenSearch.minimize(1e-3, 100, 0.8, 1.0, f);
if(a_m > a_n)
return GoldenSearch.minimize(1e-3, 100, 0.5, 1.0, f);
else
return GoldenSearch.minimize(1e-3, 100, 0.0, 0.5, f);
}
/**
* Removes the vec, alpha, and kernel cache associate with the given index
* @param toRemove the index to remove
*/
protected void removeIndex(int toRemove)
{
if(kernelAccel != null)
{
int num = this.kernelAccel.size()/vecs.size();
for(int i = 0; i < num; i++)
kernelAccel.remove(toRemove);
}
alpha.remove(toRemove);
vecs.remove(toRemove);
}
/**
* Returns the number of vectors serving as the basis set
* @return the number of vectors serving as the basis set
*/
public int getBasisSize()
{
if(vecs == null)
return 0;
return vecs.size();
}
/**
* Returns the list of the raw vectors being used by the kernel points.
* Altering this vectors will alter the same vectors used by the KernelPoint
* and will cause inconsistent results.<br>
* <br>
* The returned list may not be modified
*
* @return a the list of all the vectors in use as a basis set by this KernelPoint
*/
public List<Vec> getRawBasisVecs()
{
return Collections.unmodifiableList(vecs);
}
@Override
public KernelPoint clone()
{
return new KernelPoint(this);
}
}
| 26,243 | 34.226846 | 119 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/kernels/KernelPoints.java | package jsat.distributions.kernels;
import static java.lang.Math.abs;
import static java.lang.Math.pow;
import java.util.*;
import static jsat.distributions.kernels.KernelPoint.getH;
import jsat.linear.IndexValue;
import jsat.linear.Vec;
import jsat.utils.DoubleList;
import jsat.utils.random.RandomUtil;
/**
* This class represents a list of {@link KernelPoint} objects. This is done to
* avoid excessive memory duplication that can occur when multiple KernelPoints
* are in use at the same time.
*
* @author Edward Raff
*/
public class KernelPoints
{
private KernelTrick k;
private double errorTolerance;
private KernelPoint.BudgetStrategy budgetStrategy = KernelPoint.BudgetStrategy.PROJECTION;
private int maxBudget = Integer.MAX_VALUE;
private List<KernelPoint> points;
/**
* Creates a new set of kernel points that uses one unified gram matrix for
* each KernelPoint
* @param k the kernel trick to use in which to represent a vector in the
* kernel space
* @param points the initial number of kernel points to store in this set
* @param errorTolerance the maximum error allowed for projecting a vector
* instead of adding it to the basis set
*/
public KernelPoints(KernelTrick k, int points, double errorTolerance)
{
this(k, points, errorTolerance, true);
}
/**
* Creates a new set of kernel points
* @param k the kernel trick to use in which to represent a vector in the
* kernel space
* @param points the initial number of kernel points to store in this set
* @param errorTolerance the maximum error allowed for projecting a vector
* instead of adding it to the basis set
* @param mergeGrams whether or not to merge the gram matrices of each
* KernelPoint.
*/
public KernelPoints(KernelTrick k, int points, double errorTolerance, boolean mergeGrams)
{
if(points < 1)
throw new IllegalArgumentException("Number of points must be positive, not " + points);
this.k = k;
this.errorTolerance = errorTolerance;
this.points = new ArrayList<KernelPoint>(points);
this.points.add(new KernelPoint(k, errorTolerance));
this.points.get(0).setMaxBudget(maxBudget);
this.points.get(0).setBudgetStrategy(budgetStrategy);
for(int i = 1; i < points; i++)
addNewKernelPoint();
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public KernelPoints(KernelPoints toCopy)
{
this.k = toCopy.k.clone();
this.errorTolerance = toCopy.errorTolerance;
this.points = new ArrayList<KernelPoint>(toCopy.points.size());
if(toCopy.points.get(0).getBasisSize() == 0)//special case, nothing has been added
{
for(int i = 0; i < toCopy.points.size(); i++)
this.points.add(new KernelPoint(k, errorTolerance));
}
else
{
KernelPoint source = this.points.get(0).clone();
for (int i = 1; i < toCopy.points.size(); i++)
{
KernelPoint toAdd = new KernelPoint(k, errorTolerance);
standardMove(toAdd, source);
toAdd.kernelAccel = source.kernelAccel;
toAdd.vecs = source.vecs;
toAdd.alpha = new DoubleList(toCopy.points.get(i).alpha);
}
}
}
/**
* Sets the method used for maintaining the budget of support vectors. This
* method must be called <i>before</i> any vectors are added to the
* KernelPoint. <br>
* <br>
* The budget maintenance strategy used controls the time complexity and
* memory use of the model.
* @param budgetStrategy the budget maintenance strategy
*/
public void setBudgetStrategy(KernelPoint.BudgetStrategy budgetStrategy)
{
this.budgetStrategy = budgetStrategy;
for(KernelPoint kp : points)
kp.setBudgetStrategy(budgetStrategy);
}
/**
* Returns the budget method used
* @return the budget method used
*/
public KernelPoint.BudgetStrategy getBudgetStrategy()
{
return budgetStrategy;
}
public KernelTrick getKernel()
{
return k;
}
/**
* Sets the error tolerance used for projection maintenance strategies such
* as {@link KernelPoint.BudgetStrategy#PROJECTION}
* @param errorTolerance the error tolerance in [0, 1]
*/
public void setErrorTolerance(double errorTolerance)
{
if(Double.isNaN(errorTolerance) || errorTolerance < 0 || errorTolerance > 1)
throw new IllegalArgumentException("Error tolerance must be in [0, 1], not " + errorTolerance);
this.errorTolerance = errorTolerance;
for(KernelPoint kp : points)
kp.setErrorTolerance(errorTolerance);
}
/**
* Returns the error tolerance that is used depending on the
* {@link KernelPoint.BudgetStrategy} in use
* @return the error tolerance value
*/
public double getErrorTolerance()
{
return errorTolerance;
}
/**
* Sets the maximum budget for support vectors to allow. Setting to
* {@link Integer#MAX_VALUE} is essentially an unbounded number of support
* vectors. Increasing the budget after adding the first vector is always
* allowed, but it may not be possible to reduce the number of current
* support vectors is above the desired budget.
*
* @param maxBudget the maximum number of allowed support vectors
*/
public void setMaxBudget(int maxBudget)
{
if(maxBudget < 1)
throw new IllegalArgumentException("Budget must be positive, not " + maxBudget);
this.maxBudget = maxBudget;
for(KernelPoint kp : points)
kp.setMaxBudget(maxBudget);
}
/**
* Returns the current maximum budget for support vectors
* @return the maximum budget for support vectors
*/
public int getMaxBudget()
{
return maxBudget;
}
/**
* Returns the squared 2 norm value of the {@code k}'th KernelPoint
* @param k the KernelPoint to get the norm of
* @return the squared 2 norm of the {@code k}'th KernelPoint
*/
public double getSqrdNorm(int k)
{
return points.get(k).getSqrdNorm();
}
/**
* Computes the dot product between the {@code k}'th KernelPoint and the
* given vector in the kernel space.
* @param k the index of the KernelPoint in this set to contribute to the
* dot product
* @param x the vector to contribute to the dot product
* @param qi the query information for the vector, or {@code null} only if
* the kernel in use does not support acceleration.
* @return the dot product between the {@code k}'th KernelPoint and the
* given vector
*/
public double dot(int k, Vec x, List<Double> qi)
{
return points.get(k).dot(x, qi);
}
/**
* Computes the dot product between each KernelPoint in this set and the
* given vector in the kernel space. The results are equivalent to an array
* and setting each value using
* {@link #dot(int, jsat.linear.Vec, java.util.List) } <br>
* This method should be faster than computing the dot products individual
* since it avoids redundant kernel computations
*
* @param x the vector to contribute to the dot product
* @param qi the query information for the vector, or {@code null} only if
* the kernel in use does not support acceleration.
* @return an array where the <i>i'th</i> index contains the dot product of
* the <i>i'th</i> KernelPoint and the given vector
*/
public double[] dot(Vec x, List<Double> qi)
{
double[] dots = new double[points.size()];
final List<Vec> vecs = points.get(0).vecs;
final List<Double> cache = points.get(0).kernelAccel;
for(int i = 0; i < vecs.size(); i++)
{
double k_ix = k.eval(i, x, qi, vecs, cache);
for(int j = 0; j < points.size(); j++)
{
double alpha = points.get(j).alpha.getD(i);
if(alpha != 0)
dots[j] += k_ix*alpha;
}
}
return dots;
}
/**
* Computes the dot product between the {@code k}'th KernelPoint and the
* given KernelPoint
* @param k the index of the KernelPoint in this set to contribute to the
* dot product
* @param x the other KernelPoint to contribute to the dot product
* @return the dot product between the {@code k}'th KernelPoint and the
* given KernelPoint
*/
public double dot(int k, KernelPoint x)
{
return points.get(k).dot(x);
}
/**
* Computes the dot product between the {@code k}'th KernelPoint and the
* {@code j}'th KernelPoint in the given set of points.
* @param k the index of the KernelPoint in this set to contribute to the
* dot product
* @param X the other set of KernelPoints
* @param j the index of the KernelPoint in the given set to contribute to
* the dot product
* @return the dot product between the {@code k}'th KernelPoint and the
* {@code j}'th KernelPoint in the given set
*/
public double dot(int k, KernelPoints X, int j)
{
return points.get(k).dot(X.points.get(j));
}
/**
* Computes the Euclidean distance in the kernel space between the
* {@code k}'th KernelPoint and the given vector
* @param k the index of the KernelPoint in this set to contribute to the
* dot product
* @param x the point to get the Euclidean distance to
* @param qi the query information for the vector, or {@code null} only if
* the kernel in use does not support acceleration.
* @return the Euclidean distance between the {@code k}'th KernelPoint and
* {@code x} in the kernel space
*/
public double dist(int k, Vec x, List<Double> qi)
{
return points.get(k).dist(x, qi);
}
/**
* Computes the Euclidean distance in the kernel space between the
* {@code k}'th KernelPoint and the given KernelPoint
* @param k the index of the KernelPoint in this set to contribute to the
* dot product
* @param x the kernel point to get the Euclidean distance to
* @return the Euclidean distance between the {@code k}'th KernelPoint and
* {@code x} in the kernel space
*/
public double dist(int k, KernelPoint x)
{
return points.get(k).dist(x);
}
/**
* Computes the Euclidean distance in the kernel space between the
* {@code k}'th KernelPoint and the {@code j}'th KernelPoint in the given
* set
*
* @param k the index of the KernelPoint in this set to contribute to the
* dot product
* @param X the other set of kernel points to obtain the target KernelPoint
* @param j the index of the KernelPoint in the given set to contribute to
* the dot product
* @return the Euclidean distance between the {@code k}'th KernelPoint and
* the {@code j}'th KernelPoint in the other set
*/
public double dist(int k, KernelPoints X, int j)
{
return points.get(k).dist(X.points.get(j));
}
/**
* Alters the {@code k}'th KernelPoint by multiplying it with a constant
* value
* @param k the index of the KernelPoint to modify
* @param c the constant to multiply the KernelPoint by
*/
public void mutableMultiply(int k, double c)
{
points.get(k).mutableMultiply(c);
}
/**
* Alters all the KernelPoint objects contained in this set by the same
* constant value
* @param c the constant to multiply the KernelPoints by
*/
public void mutableMultiply(double c)
{
for(KernelPoint kp : points)
kp.mutableMultiply(c);
}
/**
* Alters ones of the KernelPoint objects by adding / subtracting a vector
* from it
* @param k the index of the KernelPoint to use
* @param c the constant to multiply the vector being added by
* @param x_t the vector to add to the kernel point
* @param qi the query information for the vector, or {@code null} only if
* the kernel in use does not support acceleration.
*/
public void mutableAdd(int k, double c, Vec x_t, final List<Double> qi)
{
}
/**
* Alters some of the KernelPoints by adding / subtracting a vector from it
* @param x_t the vector to add to the kernel point
* @param cs the array with the constant multiplies. Each non zero in
* {@code cs} is a constant to update one of the vectors by. The vector
* updated is the one corresponding to the index of the non zero value
* @param qi the query information for the vector, or {@code null} only if
* the kernel in use does not support acceleration.
*/
public void mutableAdd(Vec x_t, Vec cs, final List<Double> qi)
{
int origSize = getBasisSize();
if(cs.nnz() == 0)
return;
if(budgetStrategy == KernelPoint.BudgetStrategy.PROJECTION)
{
for(IndexValue iv : cs)
{
int k = iv.getIndex();
KernelPoint kp_k = points.get(k);
double c = iv.getValue();
if(kp_k.getBasisSize() == 0)//Special case, init people
{
kp_k.mutableAdd(c, x_t, qi);
//That initializes the structure, now we need to make people point to the same ones
for(int i = 0; i < points.size(); i++)
{
if(i == k)
continue;
KernelPoint kp_i = points.get(i);
standardMove(kp_i, kp_k);
//Only done one time since structures are mutable
kp_i.kernelAccel = kp_k.kernelAccel;
kp_i.vecs = kp_k.vecs;
//and then everyone gets their own private alphas added too
kp_i.alpha = new DoubleList(16);
kp_i.alpha.add(0.0);
}
}
else//standard case
{
kp_k.mutableAdd(c, x_t, qi);
if(origSize != kp_k.getBasisSize())//update kernels & add alpha
{
for(int i = 0; i < points.size(); i++)
if(i != k)
{
KernelPoint kp_i = points.get(i);
standardMove(kp_i, kp_k);
kp_i.alpha.add(0.0);
}
}
}
origSize = getBasisSize();//may have changed, but only once
}
}
else if (budgetStrategy == KernelPoint.BudgetStrategy.MERGE_RBF)
{
Iterator<IndexValue> cIter = cs.getNonZeroIterator();
if (getBasisSize() < maxBudget)
{
IndexValue firstIndx = cIter.next();
KernelPoint kp_k = points.get(firstIndx.getIndex());
kp_k.mutableAdd(firstIndx.getValue(), x_t, qi);
//fill in the non zeros
while (cIter.hasNext())
{
IndexValue iv = cIter.next();
points.get(iv.getIndex()).alpha.add(iv.getValue());
}
addMissingZeros();
}
else//we are going to exceed the budget
{
KernelPoint kp_k = points.get(0);
//inser the new vector before merging
kp_k.vecs.add(x_t);
if (kp_k.kernelAccel != null)
kp_k.kernelAccel.addAll(qi);
for (IndexValue iv : cs)
points.get(iv.getIndex()).alpha.add(iv.getValue());
addMissingZeros();
//now go through and merge
/*
* we use the same approximation method as in projection
* (Section 4.2) by fixing m as theSV with the smallest value
* of || α_m ||^2
*/
int m = 0;
double alpha_m = 0;
for (KernelPoint kp : points)
alpha_m += pow(kp.alpha.getD(m), 2);
for (int i = 1; i < kp_k.alpha.size(); i++)
{
double tmp = 0;
for (KernelPoint kp : points)
tmp += pow(kp.alpha.getD(i), 2);
if (tmp < alpha_m)
{
alpha_m = tmp;
m = i;
}
}
double minLoss = Double.POSITIVE_INFINITY;
int n = -1;
double n_h = 0;
double tol = 1e-3;
double n_k_mz = 0;
double n_k_nz = 0;
while (n == -1)
{
for (int i = 0; i < kp_k.alpha.size(); i++)
{
if (i == m)
continue;
double a_m = 0, a_n = 0;
for (KernelPoint kp : points)
{
double a1 = kp.alpha.getD(m);
double a2 = kp.alpha.getD(i);
double normalize = a1 + a2;
if (normalize < 1e-7)
continue;
a_m += a1 / normalize;
a_n += a2 / normalize;
}
if (abs(a_m + a_n) < tol)//avoid alphas that nearly cancle out
break;
double k_mn = this.k.eval(i, m, kp_k.vecs, kp_k.kernelAccel);
double h = getH(k_mn, a_m, a_n);
/*
* we can get k(m, z) without forming z when using RBF
*
* exp(-(m-z)^2) = exp(-(m- (h m+(1-h) n))^2 ) =
* exp(-(x-y)^2(h-1)^2) = exp((x-y)^2)^(h-1)^2
*
* and since: 0 < h < 1 (h-1)^2 = (1-h)^2
*/
double k_mz = pow(k_mn, (1 - h) * (1 - h));
double k_nz = pow(k_mn, h * h);
//TODO should we fall back to forming z if we use a non RBF kernel?
double loss = 0;
/*
* Determin the best by the smallest change in norm, 2x2
* matrix for the original alphs and alpha_z on its own
*/
for (KernelPoint kp : points)
{
double aml = kp.alpha.getD(m);
double anl = kp.alpha.getD(i);
double alpha_z = aml * k_mz + anl * k_nz;
loss += aml * aml + anl * anl
+ 2 * k_mn * aml * anl
- alpha_z * alpha_z;
}
if (loss < minLoss)
{
minLoss = loss;
n = i;
n_h = h;
n_k_mz = k_mz;
n_k_nz = k_nz;
}
}
tol /= 10;
}
Vec n_z = kp_k.vecs.get(m).multiply(n_h);
n_z.mutableAdd(1 - n_h, kp_k.vecs.get(n));
final List<Double> nz_qi = this.k.getQueryInfo(n_z);
for (int z = 0; z < points.size(); z++)
{
KernelPoint kp = points.get(z);
double aml = kp.alpha.getD(m);
double anl = kp.alpha.getD(n);
double alpha_z = aml * n_k_mz + anl * n_k_nz;
kp.finalMergeStep(m, n, n_z, nz_qi, alpha_z, z == 0);
}
}
}
else if (budgetStrategy == KernelPoint.BudgetStrategy.STOP)
{
if(getBasisSize() < maxBudget)
{
this.points.get(0).vecs.add(x_t);
if(this.points.get(0).kernelAccel != null)
this.points.get(0).kernelAccel.addAll(qi);
for(IndexValue iv : cs)
this.points.get(iv.getIndex()).alpha.add(iv.getValue());
addMissingZeros();
}
}
else if(budgetStrategy == KernelPoint.BudgetStrategy.RANDOM)
{
if(getBasisSize() >= maxBudget)
{
int toRemove = RandomUtil.getRandom().nextInt(getBasisSize());
if (getBasisSize() == maxBudget)
this.points.get(0).removeIndex(toRemove);//now remove alpha from others
for (int i = 1; i < this.points.size(); i++)
this.points.get(i).removeIndex(toRemove);
}
//now add the point
this.points.get(0).vecs.add(x_t);
if (this.points.get(0).kernelAccel != null)
this.points.get(0).kernelAccel.addAll(qi);
for (IndexValue iv : cs)
this.points.get(iv.getIndex()).alpha.add(iv.getValue());
addMissingZeros();
}
else
throw new RuntimeException("BUG: Report Me!");
}
/**
* Adds a new Kernel Point to the internal list this object represents. The
* new Kernel Point will be equivalent to creating a new KernelPoint
* directly.
*/
public void addNewKernelPoint()
{
KernelPoint source = points.get(0);
KernelPoint toAdd = new KernelPoint(k, errorTolerance);
toAdd.setMaxBudget(maxBudget);
toAdd.setBudgetStrategy(budgetStrategy);
standardMove(toAdd, source);
toAdd.kernelAccel = source.kernelAccel;
toAdd.vecs = source.vecs;
toAdd.alpha = new DoubleList(source.alpha.size());
for (int i = 0; i < source.alpha.size(); i++)
toAdd.alpha.add(0.0);
points.add(toAdd);
}
/**
* Updates the gram matrix storage of the destination to point at the exact
* same objects as the ones from the source.
* @param destination the destination object
* @param source the source object
*/
private void standardMove(KernelPoint destination, KernelPoint source)
{
destination.InvK = source.InvK;
destination.InvKExpanded = source.InvKExpanded;
destination.K = source.K;
destination.KExpanded = source.KExpanded;
}
/**
* Returns the number of basis vectors in use.
* If a vector has been added to more than one
* Kernel Point it may get double counted (or more), so the value returned
* may not be reasonable in that case.
* @return the number of basis vectors in use
*/
public int getBasisSize()
{
return this.points.get(0).getBasisSize();
}
/**
* Returns a list of the raw vectors being used by the kernel points.
* Altering this vectors will alter the same vectors used by these objects
* and will cause inconsistent results.
*
* @return the list of raw basis vectors used by the Kernel points
*/
public List<Vec> getRawBasisVecs()
{
List<Vec> vecs = new ArrayList<Vec>(getBasisSize());
vecs.addAll(this.points.get(0).vecs);
return vecs;
}
/**
* Returns the number of KernelPoints stored in this set
* @return the number of KernelPoints stored in this set
*/
public int size()
{
return points.size();
}
@Override
public KernelPoints clone()
{
return new KernelPoints(this);
}
/**
* Adds zeros to all alpha vecs that are not of the same length as the
* vec list
*/
private void addMissingZeros()
{
//go back and add 0s for the onces we missed
for (int i = 0; i < points.size(); i++)
while(points.get(i).alpha.size() < this.points.get(0).vecs.size())
points.get(i).alpha.add(0.0);
}
}
| 24,864 | 36.503771 | 107 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/kernels/KernelTrick.java |
package jsat.distributions.kernels;
import java.io.Serializable;
import java.util.List;
import jsat.linear.Vec;
import jsat.parameters.Parameterized;
/**
* The KernelTrick is a method can can be used to alter an algorithm to do its
* calculations in a projected feature space, without explicitly forming the
* features. If an algorithm uses only dot products, the Kernel trick can be
* used in place of these dot products, and computes the inner product in a
* different feature space.
* <br><br>
* All KerenlTrick objects are {@link Parameterized parameterized} so that the
* values of the kernel can be exposed by the algorithm that makes use of these
* parameters. To avoid conflicts in parameter names, the parameters of a
* KernelTrick should be of the form:<br>
* < SimpleClassName >_< Variable Name >
*
* @author Edward Raff
*/
public interface KernelTrick extends Parameterized, Cloneable, Serializable
{
/**
* Evaluate this kernel function for the two given vectors.
* @param a the first vector
* @param b the first vector
* @return the evaluation
*/
public double eval(Vec a, Vec b);
/**
* A descriptive name for the type of KernelFunction
* @return a descriptive name for the type of KernelFunction
*/
@Override
public String toString();
public KernelTrick clone();
//Cache related
/**
* Indicates if this kernel supports building an acceleration cache
* using the {@link #getAccelerationCache(List) } and associated
* cache accelerated methods. By default this method will return
* {@code false}. If {@code true}, then a cache can be obtained from this
* matrix and used in conjunction with {@link #eval(int, Vec, List, List, List) }
* and {@link #eval(int, int, List, List) } to perform kernel products.
* @return {@code true} if cache acceleration is supported for this kernel,
* {@code false} otherwise.
*/
public boolean supportsAcceleration();
/**
* Creates a new list cache values from a given list of training set
* vectors. If this kernel does not support acceleration, {@code null} will
* be returned.
*
* @param trainingSet the list of training set vectors
* @return a list of cache values that may be used by this kernel
*/
public List<Double> getAccelerationCache(List<? extends Vec> trainingSet);
/**
* Pre computes query information that would have be generated if the query
* was a member of the original list of vectors when calling
* {@link #getAccelerationCache(java.util.List) } . This can then be used if
* a large number of kernel computations are going to be done against
* points in the original set for a point that is outside the original space.
* <br><br>
* If this kernel does not support acceleration, {@code null} will be
* returned.
*
* @param q the query point to generate cache information for
* @return the cache information for the query point
*/
public List<Double> getQueryInfo(Vec q);
/**
* Appends the new cache values for the given vector to the list of cache
* values. This method is present for online style kernel learning
* algorithms, where the set of vectors is not known in advance. When a
* vector is added to the set of kernel vectors, its cache values can be
* added using this method. <br><br>
* The results of calling this sequentially on a lit of vectors starting
* with an empty double list is equivalent to getting the results from
* calling {@link #getAccelerationCache(java.util.List) }
* <br><br>
* If this kernel does not support acceleration, this method call will
* function as a nop.
*
* @param newVec the new vector to add to the cache values
* @param cache the original list of cache values to add to
*/
public void addToCache(Vec newVec, List<Double> cache);
/**
* Computes the kernel product between one vector in the original list of vectors
* with that of another vector not from the original list, but had
* information generated by {@link #getQueryInfo(jsat.linear.Vec) }.
* <br> If the cache input is {@code null}, then
* {@link #eval(jsat.linear.Vec, jsat.linear.Vec) } will be called directly.
* @param a the index of the vector in the cache
* @param b the other vector
* @param qi the query information about b
* @param vecs the list of vectors used to build the cache
* @param cache the cache associated with the given list of vectors
* @return the kernel product of the two vectors
*/
public double eval(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache);
/**
* Produces the correct kernel evaluation given the training set and the
* cache generated by {@link #getAccelerationCache(List) }. The training
* vectors should be in the same order.
*
* @param a the index of the first training vector
* @param b the index of the second training vector
* @param trainingSet the list of training set vectors
* @param cache the double list of cache values generated by this kernel
* for the given training set
* @return the same kernel evaluation result as
* {@link #eval(jsat.linear.Vec, jsat.linear.Vec) }
*/
public double eval(int a, int b, List<? extends Vec> trainingSet, List<Double> cache);
/**
* Performs an efficient summation of kernel products of the form <br>
* <big>∑</big> α<sub>i</sub> k(x<sub>i</sub>, y) <br>
* where <i>x</i> are the final set of vectors, and <i>α</i> the
* associated scalar multipliers
*
* @param finalSet the final set of vectors
* @param cache the cache associated with the final set of vectors
* @param alpha the coefficients associated with each vector
* @param y the vector to perform the summed kernel products against
* @param start the starting index (inclusive) to sum from
* @param end the ending index (exclusive) to sum from
* @return the sum of the multiplied kernel products
*/
public double evalSum(List<? extends Vec> finalSet, List<Double> cache, double[] alpha, Vec y, int start, int end);
/**
* Performs an efficient summation of kernel products of the form <br>
* <big>∑</big> α<sub>i</sub> k(x<sub>i</sub>, y) <br>
* where <i>x</i> are the final set of vectors, and <i>α</i> the
* associated scalar multipliers
*
* @param finalSet the final set of vectors
* @param cache the cache associated with the final set of vectors
* @param alpha the coefficients associated with each vector
* @param y the vector to perform the summed kernel products against
* @param qi the query information about y
* @param start the starting index (inclusive) to sum from
* @param end the ending index (exclusive) to sum from
* @return the sum of the multiplied kernel products
*/
public double evalSum(List<? extends Vec> finalSet, List<Double> cache, double[] alpha, Vec y, List<Double> qi, int start, int end);
/**
* This method indicates if a kernel is a normalized kernel or not. A
* normalized kernel is one in which k(x,x) = 1 for the same object, and no
* value greater than 1 can be returned.
*
* @return {@code true} if this is a normalized kernel. {@code false}
* otherwise.
*/
public boolean normalized();
}
| 7,675 | 43.369942 | 136 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/kernels/LinearKernel.java |
package jsat.distributions.kernels;
import java.util.Arrays;
import java.util.List;
import jsat.linear.Vec;
import jsat.parameters.DoubleParameter;
import jsat.parameters.Parameter;
/**
* Provides a linear kernel function, which computes the normal dot product.
* k(x,y) = x.y + c
*
* @author Edward Raff
*/
public class LinearKernel extends BaseKernelTrick
{
private static final long serialVersionUID = -1870181048970135367L;
private double c;
/**
* Creates a new Linear Kernel that computes the dot product and offsets it by a specified value
* @param c the positive bias term for the dot product
*/
public LinearKernel(double c)
{
this.c = c;
}
/**
* Creates a new Linear Kernel with an added bias term of 1
*/
public LinearKernel()
{
this(1);
}
/**
* The positive bias term added to the result of the dot product
* @param c the added product term
*/
public void setC(double c)
{
if(c < 0 || Double.isInfinite(c) || Double.isNaN(c))
throw new IllegalArgumentException("C must be a positive constant, not " + c);
this.c = c;
}
/**
* Returns the positive additive term
* @return the positive additive term
*/
public double getC()
{
return c;
}
@Override
public double eval(Vec a, Vec b)
{
return a.dot(b) + c;
}
@Override
public String toString()
{
return "Linear Kernel (c=" + c + ")";
}
@Override
public LinearKernel clone()
{
return new LinearKernel(c);
}
}
| 1,639 | 20.025641 | 100 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/kernels/NormalizedKernel.java | /*
* Copyright (C) 2016 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.distributions.kernels;
import java.util.Arrays;
import java.util.List;
import jsat.linear.Vec;
import jsat.parameters.Parameter;
/**
* This provides a wrapper kernel that produces a normalized kernel trick from
* any input kernel trick. A normalized kernel has a maximum output of 1 when
* two inputs are the same.
*
* @author Edward Raff
*/
public class NormalizedKernel implements KernelTrick
{
private KernelTrick k;
public NormalizedKernel(KernelTrick source_kernel)
{
this.k = source_kernel;
}
@Override
public NormalizedKernel clone()
{
return new NormalizedKernel(k.clone());
}
@Override
public double eval(Vec a, Vec b)
{
double aa = k.eval(a, a);
double bb = k.eval(b, b);
if(aa == 0 || bb == 0)
return 0;
else
return k.eval(a, b)/Math.sqrt(aa*bb);
}
@Override
public List<Parameter> getParameters()
{
return k.getParameters();
}
@Override
public Parameter getParameter(String paramName)
{
return k.getParameter(paramName);
}
@Override
public boolean supportsAcceleration()
{
return k.supportsAcceleration();
}
@Override
public List<Double> getAccelerationCache(List<? extends Vec> trainingSet)
{
return k.getAccelerationCache(trainingSet);
}
@Override
public List<Double> getQueryInfo(Vec q)
{
return k.getQueryInfo(q);
}
@Override
public void addToCache(Vec newVec, List<Double> cache)
{
k.addToCache(newVec, cache);
}
@Override
public double eval(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache)
{
double aa = k.eval(a, a, vecs, cache);
double bb = k.eval(0, 0, Arrays.asList(b), qi);
if(aa == 0 || bb == 0)
return 0;
else
return k.eval(a, b, qi, vecs, cache)/Math.sqrt(aa*bb);
}
@Override
public double eval(int a, int b, List<? extends Vec> trainingSet, List<Double> cache)
{
double aa = k.eval(a, a, trainingSet, cache);
double bb = k.eval(b, b, trainingSet, cache);
if(aa == 0 || bb == 0)
return 0;
else
return k.eval(a, b, trainingSet, cache)/Math.sqrt(aa*bb);
}
@Override
public double evalSum(List<? extends Vec> finalSet, List<Double> cache, double[] alpha, Vec y, int start, int end)
{
return evalSum(finalSet, cache, alpha, y, getQueryInfo(y), start, end);
}
@Override
public double evalSum(List<? extends Vec> finalSet, List<Double> cache, double[] alpha, Vec y, List<Double> qi, int start, int end)
{
double sum = 0;
for(int i = start; i < end; i++)
sum += alpha[i] * eval(i, y, qi, finalSet, cache);
return sum;
}
@Override
public boolean normalized()
{
return true;
}
}
| 3,679 | 25.666667 | 135 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/kernels/PolynomialKernel.java |
package jsat.distributions.kernels;
import java.util.*;
import jsat.DataSet;
import jsat.distributions.Distribution;
import jsat.distributions.LogUniform;
import jsat.distributions.discrete.UniformDiscrete;
import jsat.linear.Vec;
import jsat.parameters.DoubleParameter;
import jsat.parameters.Parameter;
/**
* Provides a Polynomial Kernel of the form <br>
* k(x,y) = (alpha * x.y + c)^d
* @author Edward Raff
*/
public class PolynomialKernel extends BaseKernelTrick
{
private static final long serialVersionUID = 9123109691782934745L;
private double degree;
private double alpha;
private double c;
/**
* Creates a new polynomial kernel
* @param degree the degree of the polynomial
* @param alpha the term to scale the dot product by
* @param c the additive term
*/
public PolynomialKernel(double degree, double alpha, double c)
{
this.degree = degree;
this.alpha = alpha;
this.c = c;
}
/**
* Defaults alpha = 1 and c = 1
* @param degree the degree of the polynomial
*/
public PolynomialKernel(double degree)
{
this(degree, 1, 1);
}
/**
* Sets the scaling factor for the dot product, this is equivalent to
* multiplying each value in the data set by a constant factor
* @param alpha the scaling factor
*/
public void setAlpha(double alpha)
{
if(Double.isInfinite(alpha) || Double.isNaN(alpha) || alpha == 0)
throw new IllegalArgumentException("alpha must be a real non zero value, not " + alpha);
this.alpha = alpha;
}
/**
* Sets the additive term, when set to one this is equivalent to adding a
* bias term of 1 to each vector. This is done after the scaling by
* {@link #setAlpha(double) alpha}.
* @param c the non negative additive term
*/
public void setC(double c)
{
if(c < 0 || Double.isNaN(c) || Double.isInfinite(c))
throw new IllegalArgumentException("C must be non negative, not " + c);
this.c = c;
}
/**
* Sets the degree of the polynomial
* @param d the degree of the polynomial
*/
public void setDegree(double d)
{
this.degree = d;
}
/**
* Returns the scaling parameter
* @return the scaling parameter
*/
public double getAlpha()
{
return alpha;
}
/**
* Returns the additive constant
* @return the additive constant
*/
public double getC()
{
return c;
}
/**
* Returns the degree of the polynomial
* @return the degree of the polynomial
*/
public double getDegree()
{
return degree;
}
@Override
public double eval(Vec a, Vec b)
{
return Math.pow(c+a.dot(b)*alpha, degree);
}
@Override
public String toString()
{
return "Polynomial Kernel ( degree="+degree + ", c=" + c + ", alpha=" + alpha + ")";
}
/**
* Guesses the distribution to use for the degree parameter
*
* @param d the dataset to get the guess for
* @return the guess for the degree parameter
* @see #setDegree(double)
*/
public static Distribution guessDegree(DataSet d)
{
return new UniformDiscrete(2, 9);
}
@Override
public PolynomialKernel clone()
{
return new PolynomialKernel(degree, alpha, c);
}
}
| 3,449 | 23.820144 | 100 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/kernels/PukKernel.java | package jsat.distributions.kernels;
import java.util.List;
import jsat.DataSet;
import jsat.distributions.Distribution;
import jsat.distributions.LogUniform;
import jsat.distributions.Uniform;
import jsat.linear.Vec;
import jsat.parameters.Parameterized;
/**
* The PUK kernel is an alternative to the RBF Kernel. By altering the
* {@link #setOmega(double) omega} parameter the behavior of the PUK kernel can
* be controlled. The {@link #setSigma(double) sigma} parameter works in the
* same way as the RBF Kernel.<br>
* <br>
* See: Üstün, B., Melssen, W. J.,&Buydens, L. M. C. (2006). <i>Facilitating
* the application of Support Vector Regression by using a universal Pearson VII
* function based kernel</i>. Chemometrics and Intelligent Laboratory Systems,
* 81(1), 29–40. doi:10.1016/j.chemolab.2005.09.003
*
* @author Edward Raff
*/
public class PukKernel extends BaseL2Kernel implements Parameterized
{
private static final long serialVersionUID = 8727097671803148320L;
private double sigma;
private double omega;
private double cnst;
/**
* Creates a new PUK Kernel
* @param sigma the width parameter of the kernel
* @param omega the shape parameter of the kernel
*/
public PukKernel(double sigma, double omega)
{
setSigma(sigma);
setOmega(omega);
}
/**
* Sets the omega parameter value, which controls the shape of the kernel
* @param omega the positive parameter value
*/
public void setOmega(double omega)
{
if(omega <= 0 || Double.isNaN(omega) || Double.isInfinite(omega))
throw new ArithmeticException("omega must be positive, not " + omega);
this.omega = omega;
this.cnst = Math.sqrt(Math.pow(2, 1/omega)-1);
}
public double getOmega()
{
return omega;
}
/**
* Sets the sigma parameter value, which controls the width of the kernel
* @param sigma the positive parameter value
*/
public void setSigma(double sigma)
{
if(sigma <= 0 || Double.isNaN(sigma) || Double.isInfinite(sigma))
throw new ArithmeticException("sigma must be positive, not " + sigma);
this.sigma = sigma;
}
public double getSigma()
{
return sigma;
}
@Override
public double eval(Vec a, Vec b)
{
return getVal(a.pNormDist(2.0, b));
}
@Override
public PukKernel clone()
{
return new PukKernel(sigma, omega);
}
@Override
public double eval(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache)
{
return getVal(Math.sqrt(getSqrdNorm(a, b, qi, vecs, cache)));
}
@Override
public double eval(int a, int b, List<? extends Vec> trainingSet, List<Double> cache)
{
return getVal(Math.sqrt(getSqrdNorm(b, b, trainingSet, cache)));
}
private double getVal(double pNormDist)
{
double tmp = 2*pNormDist*cnst/sigma;
return 1/Math.pow(1+tmp*tmp, omega);
}
/**
* Guesses the distribution to use for the ω parameter
*
* @param d the dataset to get the guess for
* @return the guess for the ω parameter
* @see #setOmega(double)
*/
public static Distribution guessOmega(DataSet d)
{
return new LogUniform(0.25, 50);
}
/**
* Guesses the distribution to use for the λ parameter
*
* @param d the dataset to get the guess for
* @return the guess for the λ parameter
* @see #setSigma(double)
*/
public static Distribution guessSigma(DataSet d)
{
return RBFKernel.guessSigma(d);
}
@Override
public boolean normalized()
{
return true;
}
}
| 3,781 | 26.808824 | 99 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/kernels/RBFKernel.java |
package jsat.distributions.kernels;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.distributions.Distribution;
import jsat.distributions.Exponential;
import jsat.distributions.LogUniform;
import jsat.distributions.Uniform;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.math.OnLineStatistics;
import jsat.text.GreekLetters;
import jsat.utils.DoubleList;
import jsat.utils.IntList;
import jsat.utils.ListUtils;
/**
* Provides a kernel for the Radial Basis Function, which is of the form
* <br>
* k(x, y) = exp(-||x-y||<sup>2</sup>/(2*σ<sup>2</sup>))
*
* @author Edward Raff
*/
public class RBFKernel extends BaseL2Kernel
{
private static final long serialVersionUID = -6733691081172950067L;
private double sigma;
private double sigmaSqrd2Inv;
/**
* Creates a new RBF kernel with σ = 1
*/
public RBFKernel()
{
this(1.0);
}
/**
* Creates a new RBF kernel
* @param sigma the sigma parameter
*/
public RBFKernel(double sigma)
{
setSigma(sigma);
}
@Override
public double eval(Vec a, Vec b)
{
if(a == b)//Same refrence means dist of 0, exp(0) = 1
return 1;
return Math.exp(-Math.pow(a.pNormDist(2, b),2) * sigmaSqrd2Inv);
}
@Override
public double eval(int a, int b, List<? extends Vec> trainingSet, List<Double> cache)
{
if(a == b)
return 1;
return Math.exp(-getSqrdNorm(a, b, trainingSet, cache)* sigmaSqrd2Inv);
}
@Override
public double eval(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache)
{
return Math.exp(-getSqrdNorm(a, b, qi, vecs, cache)* sigmaSqrd2Inv);
}
/**
* Sets the sigma parameter, which must be a positive value
* @param sigma the sigma value
*/
public void setSigma(double sigma)
{
if(sigma <= 0)
throw new IllegalArgumentException("Sigma must be a positive constant, not " + sigma);
this.sigma = sigma;
this.sigmaSqrd2Inv = 0.5/(sigma*sigma);
}
public double getSigma()
{
return sigma;
}
@Override
public String toString()
{
return "RBF Kernel( " + GreekLetters.sigma +" = " + sigma +")";
}
@Override
public RBFKernel clone()
{
return new RBFKernel(sigma);
}
/**
* Another common (equivalent) form of the RBF kernel is k(x, y) =
* exp(-γ||x-y||<sup>2</sup>). This method converts the σ value
* used by this class to the equivalent γ value.
* @param sigma the value of σ
* @return the equivalent γ value.
*/
public static double sigmaToGamma(double sigma)
{
if(sigma <= 0 || Double.isNaN(sigma) || Double.isInfinite(sigma))
throw new IllegalArgumentException("sigma must be positive, not " + sigma);
return 1/(2*sigma*sigma);
}
/**
* Another common (equivalent) form of the RBF kernel is k(x, y) =
* exp(-γ||x-y||<sup>2</sup>). This method converts the γ value
* equivalent σ value used by this class.
* @param gamma the value of γ
* @return the equivalent σ value
*/
public static double gammToSigma(double gamma)
{
if(gamma <= 0 || Double.isNaN(gamma) || Double.isInfinite(gamma))
throw new IllegalArgumentException("gamma must be positive, not " + gamma);
return 1/Math.sqrt(2*gamma);
}
/**
* Guess the distribution to use for the kernel width term
* {@link #setSigma(double) σ} in the RBF kernel.
*
* @param d the data set to get the guess for
* @return the guess for the σ parameter in the RBF Kernel
*/
public static Distribution guessSigma(DataSet d)
{
return GeneralRBFKernel.guessSigma(d, new EuclideanDistance());
}
@Override
public boolean normalized()
{
return true;
}
}
| 4,206 | 27.046667 | 99 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/kernels/RationalQuadraticKernel.java |
package jsat.distributions.kernels;
import java.util.List;
import jsat.DataSet;
import jsat.distributions.Distribution;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.EuclideanDistance;
/**
* Provides an implementation of the Rational Quadratic Kernel, which is of the
* form: <br>
* k(x, y) = 1 - ||x-y||<sup>2</sup> / (||x-y||<sup>2</sup> + c)
*
* @author Edward Raff
*/
public class RationalQuadraticKernel extends BaseL2Kernel
{
private static final long serialVersionUID = 6773399185851115840L;
private double c;
/**
* Creates a new RQ Kernel
* @param c the positive additive coefficient
*/
public RationalQuadraticKernel(double c)
{
this.c = c;
}
/**
* Sets the positive additive coefficient
* @param c the positive additive coefficient
*/
public void setC(double c)
{
if(c <= 0 || Double.isNaN(c) || Double.isInfinite(c))
throw new IllegalArgumentException("coefficient must be in (0, Inf), not " + c);
this.c = c;
}
/**
* Returns the positive additive coefficient
* @return the positive additive coefficient
*/
public double getC()
{
return c;
}
@Override
public double eval(Vec a, Vec b)
{
double dist = Math.pow(a.pNormDist(2, b), 2);
return 1-dist/(dist+c);
}
@Override
public double eval(int a, int b, List<? extends Vec> trainingSet, List<Double> cache)
{
double dist = getSqrdNorm(a, b, trainingSet, cache);
return 1-dist/(dist+c);
}
@Override
public double eval(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache)
{
double dist = getSqrdNorm(a, b, qi, vecs, cache);
return 1-dist/(dist+c);
}
@Override
public RationalQuadraticKernel clone()
{
return new RationalQuadraticKernel(c);
}
/**
* Guess the distribution to use for the C parameter.
*
* @param d the data set to get the guess for
* @return the guess for the C parameter
* @see #setC(double)
*/
public static Distribution guessC(DataSet d)
{
//TODO come up with a better estiamte
return RBFKernel.guessSigma(d);//suprisingly this seens to work well
}
@Override
public boolean normalized()
{
return true;
}
}
| 2,417 | 23.673469 | 99 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/kernels/SigmoidKernel.java |
package jsat.distributions.kernels;
import java.util.*;
import jsat.DataSet;
import jsat.distributions.Distribution;
import jsat.distributions.LogUniform;
import jsat.distributions.Uniform;
import jsat.linear.Vec;
import jsat.parameters.Parameter;
/**
* Provides an implementation of the Sigmoid (Hyperbolic Tangent) Kernel, which
* is of the form:<br> k(x, y) = tanh(alpha * < x, y > +c)<br>
* Technically, this kernel is not positive definite.
*
* @author Edward Raff
*/
public class SigmoidKernel extends BaseKernelTrick
{
private static final long serialVersionUID = 8066799016611439349L;
private double alpha;
private double c;
/**
* Creates a new Sigmoid Kernel
* @param alpha the scaling factor for the dot product
* @param C the additive constant
*/
public SigmoidKernel(double alpha, double C)
{
this.alpha = alpha;
this.c = C;
}
/**
* Creates a new Sigmoid Kernel with a bias term of 1
* @param alpha the scaling factor for the dot product
*/
public SigmoidKernel(double alpha)
{
this(alpha, 1);
}
/**
* Sets the scaling factor for the dot product, this is equivalent to
* multiplying each value in the data set by a constant factor
* @param alpha the scaling factor
*/
public void setAlpha(double alpha)
{
if(Double.isInfinite(alpha) || Double.isNaN(alpha) || alpha == 0)
throw new IllegalArgumentException("alpha must be a real non zero value, not " + alpha);
this.alpha = alpha;
}
/**
* Returns the scaling parameter
* @return the scaling parameter
*/
public double getAlpha()
{
return alpha;
}
/**
* Sets the additive term, when set to one this is equivalent to adding a
* bias term of 1 to each vector. This is done after the scaling by
* {@link #setAlpha(double) alpha}.
* @param c the non negative additive term
*/
public void setC(double c)
{
if(c < 0 || Double.isNaN(c) || Double.isInfinite(c))
throw new IllegalArgumentException("C must be non negative, not " + c);
this.c = c;
}
/**
* Returns the additive constant
* @return the additive constant
*/
public double getC()
{
return c;
}
@Override
public double eval(Vec a, Vec b)
{
return Math.tanh(alpha*a.dot(b)+c);
}
/**
* Guesses a distribution for the α parameter
*
* @param d the data to get the guess for
* @return a distribution for the α parameter
*/
public static Distribution guessAlpha(DataSet d)
{
return new LogUniform(1e-12, 1e3);//from A Study on Sigmoid Kernels for SVM and the Training of non-PSD Kernels by SMO-type Methods
}
/**
* Guesses a distribution for the α parameter
*
* @param d the data to get the guess for
* @return a distribution for the α parameter
*/
public static Distribution guessC(DataSet d)
{
return new Uniform(-2.4, 2.4);//from A Study on Sigmoid Kernels for SVM and the Training of non-PSD Kernels by SMO-type Methods
}
@Override
public SigmoidKernel clone()
{
return new SigmoidKernel(alpha, c);
}
}
| 3,341 | 26.393443 | 139 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/multivariate/Dirichlet.java |
package jsat.distributions.multivariate;
import java.util.ArrayList;
import java.util.Random;
import java.util.List;
import jsat.classifiers.DataPoint;
import jsat.distributions.Gamma;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.math.Function;
import jsat.math.optimization.NelderMead;
import static java.lang.Math.*;
import static jsat.math.SpecialMath.*;
import jsat.utils.concurrent.ParallelUtils;
/**
* An implementation of the Dirichlet distribution. The Dirichlet distribution takes a vector of
* positive alphas as its argument, which also specifies the dimension of the distribution. The
* Dirichlet distribution has a non zero {@link #pdf(jsat.linear.Vec) PDF} only when the input
* vector sums to 1.0, and contains no negative or zero values.
*
* @author Edward Raff
*/
public class Dirichlet extends MultivariateDistributionSkeleton
{
private static final long serialVersionUID = 6229508050763067569L;
private Vec alphas;
/**
* Creates a new Dirichlet distribution.
*
* @param alphas the positive alpha values for the distribution. The length of the vector indicates the dimension
* @throws ArithmeticException if any of the alpha values are not positive
*/
public Dirichlet(Vec alphas)
{
setAlphas(alphas);
}
/**
* Sets the alphas of the distribution. A copy is made, so altering the input does not effect the distribution.
* @param alphas the parameter values
* @throws ArithmeticException if any of the alphas are not positive numbers
*/
public void setAlphas(Vec alphas) throws ArithmeticException
{
double tmp;
for(int i = 0; i < alphas.length(); i++)
if( (tmp = alphas.get(i)) <= 0 || Double.isNaN(tmp) || Double.isInfinite(tmp))
throw new ArithmeticException("Dirichlet Distribution parameters must be positive, " + tmp + " is invalid");
this.alphas = alphas.clone();
}
/**
* Returns the backing vector that contains the alphas specifying the current distribution. Mutable operations should not be applied.
* @return the alphas that make the current distribution.
*/
public Vec getAlphas()
{
return alphas;
}
@Override
public Dirichlet clone()
{
return new Dirichlet(alphas);
}
@Override
public double logPdf(Vec x)
{
if(x.length() != alphas.length())
throw new ArithmeticException( alphas.length() + " variable distribution can not awnser a " + x.length() + " dimension variable");
double logVal = 0;
double tmp;
double sum = 0.0;
for(int i = 0; i < alphas.length(); i++)
{
tmp = x.get(i);
if(tmp <= 0)//All values must be positive to be possible
return -Double.MAX_VALUE;
sum += tmp;
logVal += log(x.get(i))*(alphas.get(i)-1.0);
}
if(abs(sum - 1.0) > 1e-14)//Some wiglle room, but should sum to one
return -Double.MAX_VALUE;
/**
* Normalizing constant is defined by
*
* N
* =====
* | |
* | | Gamma/a \
* | | \ i/
* | |
* i = 1
* B(alpha) = ---------------
* / N \
* |===== |
* |\ |
* Gamma| > a |
* |/ i|
* |===== |
* \i = 1 /
*/
double logNormalizer = 0.0;
for(int i = 0; i < alphas.length(); i++)
logNormalizer += lnGamma(alphas.get(i));
logNormalizer -= lnGamma(alphas.sum());
return logVal - logNormalizer;
}
public double pdf(Vec x)
{
return exp(logPdf(x));
}
@Override
public <V extends Vec> boolean setUsingData(List<V> dataSet, boolean parallel)
{
Function logLike = (Vec x, boolean p) ->
{
double constantTerm = lnGamma(x.sum());
for(int i = 0; i < x.length(); i++)
constantTerm -= lnGamma(x.get(i));
double sum = ParallelUtils.run(p, dataSet.size(), (start, end)->
{
double local_sum = 0;
for(int i = start; i < end; i++)
{
Vec s = dataSet.get(i);
for(int j = 0; j < x.length(); j++)
local_sum += log(s.get(j))*(x.get(j)-1.0);
}
return local_sum;
}, (a,b)->a+b);
return -(sum+constantTerm*dataSet.size());
};
NelderMead optimize = new NelderMead();
Vec guess = new DenseVector(dataSet.get(0).length());
List<Vec> guesses = new ArrayList<>();
guesses.add(guess.add(1.0));
guesses.add(guess.add(0.1));
guesses.add(guess.add(10.0));
this.alphas = optimize.optimize(1e-10, 100, logLike, guesses, parallel);
return true;
}
@Override
public List<Vec> sample(int count, Random rand)
{
List<Vec> samples = new ArrayList<>(count);
double[][] gammaSamples = new double[alphas.length()][];
for(int i = 0; i < gammaSamples.length; i++)
{
Gamma gamma = new Gamma(alphas.get(i), 1.0);
gammaSamples[i] = gamma.sample(count, rand);
}
for(int i = 0; i < count; i++)
{
Vec sample = new DenseVector(alphas.length());
for(int j = 0; j < alphas.length(); j++)
sample.set(j, gammaSamples[j][i]);
sample.mutableDivide(sample.sum());
samples.add(sample);
}
return samples;
}
}
| 5,992 | 31.748634 | 146 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/multivariate/IndependentDistribution.java | /*
* Copyright (C) 2019 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.distributions.multivariate;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;
import jsat.distributions.ContinuousDistribution;
import jsat.distributions.Distribution;
import jsat.distributions.discrete.DiscreteDistribution;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
/**
*
* @author Edward Raff
*/
public class IndependentDistribution implements MultivariateDistribution
{
protected List<Distribution> distributions;
public IndependentDistribution(List<Distribution> distributions)
{
this.distributions = distributions;
}
public IndependentDistribution(IndependentDistribution toCopy)
{
this.distributions = toCopy.distributions.stream()
.map(Distribution::clone)
.collect(Collectors.toList());
}
@Override
public double logPdf(Vec x)
{
if(x.length() != distributions.size())
throw new ArithmeticException("Expected input of size " + distributions.size() + " not " + x.length());
double logPDF = 0;
for(int i = 0; i < x.length(); i++)
{
Distribution dist = distributions.get(i);
if(dist instanceof DiscreteDistribution)
logPDF += ((DiscreteDistribution)dist).logPmf((int) Math.round(x.get(i)));
else
logPDF += ((ContinuousDistribution) dist).logPdf(x.get(i));
}
return logPDF;
}
@Override
public <V extends Vec> boolean setUsingData(List<V> dataSet, boolean parallel)
{
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public MultivariateDistribution clone()
{
return new IndependentDistribution(this);
}
@Override
public List<Vec> sample(int count, Random rand)
{
List<Vec> sample = new ArrayList<>();
for(int i = 0; i < count; i++)
{
sample.add(new DenseVector(distributions.size()));
}
for (int j = 0; j < distributions.size(); j++)
{
Distribution d = distributions.get(j);
double[] vals = d.sample(count, rand);
for(int i = 0; i < sample.size(); i++)
sample.get(i).set(j, vals[i]);
}
return sample;
}
}
| 3,138 | 30.39 | 135 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/multivariate/MetricKDE.java |
package jsat.distributions.multivariate;
import java.util.*;
import java.util.concurrent.ExecutorService;
import jsat.distributions.empirical.KernelDensityEstimator;
import jsat.distributions.empirical.kernelfunc.EpanechnikovKF;
import jsat.distributions.empirical.kernelfunc.KernelFunction;
import jsat.exceptions.UntrainedModelException;
import jsat.linear.Vec;
import jsat.linear.VecPaired;
import jsat.linear.distancemetrics.*;
import jsat.linear.vectorcollection.*;
import jsat.math.OnLineStatistics;
import jsat.parameters.*;
import jsat.utils.concurrent.ParallelUtils;
/**
* MetricKDE is a generalization of the {@link KernelDensityEstimator} to the multivariate case.
* A {@link KernelFunction} is used to weight the contribution of each data point, and a
* {@link DistanceMetric } is used to effectively alter the shape of the kernel. The MetricKDE uses
* one bandwidth parameter, which can be estimated using a nearest neighbor approach, or tuned by hand.
* The bandwidth of the MetricKDE can not be estimated en the same way as the univariate case.
*
* @author Edward Raff
*/
public class MetricKDE extends MultivariateKDE implements Parameterized
{
private static final long serialVersionUID = -2084039950938740815L;
private KernelFunction kf;
private double bandwidth;
private DistanceMetric distanceMetric;
private VectorCollection<VecPaired<Vec, Integer>> vc;
private int defaultK;
private double defaultStndDev;
private static final VectorCollection<VecPaired<Vec, Integer>> defaultVC = new DefaultVectorCollection<>();
/**
* When estimating the bandwidth, the distances of the k'th nearest
* neighbors are used to perform the estimate. The default value of
* this k is {@value #DEFAULT_K}
*/
public static final int DEFAULT_K = 3;
/**
* When estimating the bandwidth, the distances of the k'th nearest
* neighbors are used to perform the estimate. The default number of
* standard deviations from the mean to add to the bandwidth estimate
* is {@value #DEFAULT_STND_DEV}
*/
public static final double DEFAULT_STND_DEV = 2.0;
/**
* When estimating the bandwidth, the distances of the k'th nearest
* neighbors are used to perform the estimate. The weight of each neighbor
* is controlled by the kernel function.
*/
public static final KernelFunction DEFAULT_KF = EpanechnikovKF.getInstance();
/**
* Creates a new KDE object that still needs a data set to model the distribution of
*/
public MetricKDE()
{
this(DEFAULT_KF, new EuclideanDistance(), defaultVC);
}
/**
* Creates a new KDE object that still needs a data set to model the distribution of
*
* @param distanceMetric the distance metric to use
*/
public MetricKDE(DistanceMetric distanceMetric)
{
this(DEFAULT_KF, distanceMetric, defaultVC);
}
/**
* Creates a new KDE object that still needs a data set to model the distribution of
* @param distanceMetric the distance metric to use
* @param vc a vector collection to generate vector collection from
*/
public MetricKDE(DistanceMetric distanceMetric, VectorCollection<VecPaired<Vec, Integer>> vc)
{
this(DEFAULT_KF, distanceMetric, vc);
}
public MetricKDE(KernelFunction kf, DistanceMetric distanceMetric)
{
this(kf, distanceMetric, new DefaultVectorCollection<VecPaired<Vec, Integer>>());
}
/**
* Creates a new KDE object that still needs a data set to model the distribution of
* @param kf the kernel function to use
* @param distanceMetric the distance metric to use
* @param vc a factory to generate vector collection from
*/
public MetricKDE(KernelFunction kf, DistanceMetric distanceMetric, VectorCollection<VecPaired<Vec, Integer>> vc)
{
this(kf, distanceMetric, vc, DEFAULT_K, DEFAULT_STND_DEV);
}
/**
* Creates a new KDE object that still needs a data set to model the distribution of
* @param kf the kernel function to use
* @param distanceMetric the distance metric to use
* @param vc a factory to generate vector collection from
* @param defaultK the default neighbor to use when estimating the bandwidth
* @param defaultStndDev the default multiple of standard deviations to add when estimating the bandwidth
*/
public MetricKDE(KernelFunction kf, DistanceMetric distanceMetric, VectorCollection<VecPaired<Vec, Integer>> vc, int defaultK, double defaultStndDev)
{
setKernelFunction(kf);
this.distanceMetric = distanceMetric;
this.vc = vc;
setDefaultK(defaultK);
setDefaultStndDev(defaultStndDev);
}
/**
* Sets the bandwidth used to estimate the density of the underlying distribution. Too small a bandwidth
* results in high variance, while too large causes high bias.
*
* @param bandwidth the bandwidth to use for estimation
* @throws ArithmeticException if the bandwidth given is not a positive number
*/
public void setBandwith(double bandwidth)
{
if(bandwidth <= 0 || Double.isNaN(bandwidth) || Double.isInfinite(bandwidth))
throw new ArithmeticException("Invalid bandwith given, bandwith must be a positive number, not " + bandwidth);
this.bandwidth = bandwidth;
}
/**
* Returns the current bandwidth used
* @return the current bandwidth
*/
public double getBandwith()
{
return bandwidth;
}
/**
* When estimating the bandwidth, the mean of the k'th nearest neighbors to each data point
* is used. This value controls the default value of k used when it is not specified.
*
* @param defaultK
*/
public void setDefaultK(int defaultK)
{
if(defaultK <= 0)
throw new ArithmeticException("At least one neighbor must be taken into acount, " + defaultK + " is invalid");
this.defaultK = defaultK;
}
/**
* Returns the default value of the k'th nearest neighbor to use when not specified.
* @return the default neighbor used to estimate the bandwidth when not specified
*/
public int getDefaultK()
{
return defaultK;
}
/**
* When estimating the bandwidth, the mean of the neighbor distances is used, and a multiple of
* the standard deviations is added. This controls the multiplier value used when the bandwidth is not specified.
* Zero and negative multipliers are allowed, but a negative multiplier may result in the fitting failing.
*
* @param defaultStndDev the multiple of the standard deviation to add the to bandwidth estimate
*/
public void setDefaultStndDev(double defaultStndDev)
{
if(Double.isInfinite(defaultStndDev) || Double.isNaN(defaultStndDev) || defaultStndDev <= 0)
throw new ArithmeticException("The number of standard deviations to remove must bea postive number, not " + defaultStndDev);
this.defaultStndDev = defaultStndDev;
}
/**
* Returns the multiple of the standard deviations that is added to the bandwidth estimate
* @return the multiple of the standard deviations that is added to the bandwidth estimate
*/
public double getDefaultStndDev()
{
return defaultStndDev;
}
/**
* Returns the distance metric that is used for density estimation
* @return the metric used
*/
public DistanceMetric getDistanceMetric()
{
return distanceMetric;
}
/**
* Sets the distance metric that is used for density estimation
* @param distanceMetric the metric to use
*/
public void setDistanceMetric(DistanceMetric distanceMetric)
{
this.distanceMetric = distanceMetric;
}
@Override
public MetricKDE clone()
{
MetricKDE clone = new MetricKDE(kf, distanceMetric.clone(), vc.clone(), defaultK, defaultStndDev);
clone.bandwidth = this.bandwidth;
if(this.vc != null)
clone.vc = this.vc.clone();
return clone;
}
@Override
public List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> getNearby(Vec x)
{
if(vc == null)
throw new UntrainedModelException("Model has not yet been created");
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> nearBy = getNearbyRaw(x);
//Normalize from their distances to their weights by kernel function
for(VecPaired<VecPaired<Vec, Integer>, Double> result : nearBy)
result.setPair(kf.k(result.getPair()));
return nearBy;
}
@Override
public List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> getNearbyRaw(Vec x)
{
if(vc == null)
throw new UntrainedModelException("Model has not yet been created");
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> nearBy = vc.search(x, bandwidth*kf.cutOff());
for(VecPaired<VecPaired<Vec, Integer>, Double> result : nearBy)
result.setPair(result.getPair()/bandwidth);
return nearBy;
}
@Override
public double pdf(Vec x)
{
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> nearBy = getNearby(x);
if(nearBy.isEmpty())
return 0;
double PDF = 0;
for(VecPaired<VecPaired<Vec, Integer>, Double> result : nearBy)
PDF+= result.getPair();
return PDF / (vc.size() * Math.pow(bandwidth, nearBy.get(0).length()));
}
@Override
public <V extends Vec> boolean setUsingData(List<V> dataSet, boolean parallel)
{
ExecutorService ex = ParallelUtils.getNewExecutor(parallel);
boolean toRet = setUsingData(dataSet, ex);
ex.shutdownNow();
return toRet;
}
/**
* Sets the KDE to model the density of the given data set with the specified bandwidth
* @param dataSet the data set to model the density of
* @param bandwith the bandwidth
* @return <tt>true</tt> if the model was fit, <tt>false</tt> if it could not be fit.
*/
public <V extends Vec> boolean setUsingData(List<V> dataSet, double bandwith)
{
return setUsingData(dataSet, bandwith, null);
}
/**
* Sets the KDE to model the density of the given data set with the specified bandwidth
* @param dataSet the data set to model the density of
* @param bandwith the bandwidth
* @param threadpool the source of threads for parallel construction
* @return <tt>true</tt> if the model was fit, <tt>false</tt> if it could not be fit.
*/
public <V extends Vec> boolean setUsingData(List<V> dataSet, double bandwith, ExecutorService threadpool)
{
setBandwith(bandwith);
List<VecPaired<Vec, Integer>> indexVectorPair = new ArrayList<>(dataSet.size());
for(int i = 0; i < dataSet.size(); i++)
indexVectorPair.add(new VecPaired<>(dataSet.get(i), i));
TrainableDistanceMetric.trainIfNeeded(distanceMetric, dataSet, threadpool);
vc.build(threadpool != null, indexVectorPair, distanceMetric);
return true;
}
/**
* Sets the KDE to model the density of the given data set by estimating the bandwidth by using
* the <tt>k</tt> nearest neighbors of each data point.
* @param dataSet the data set to model the density of
* @param k the number of neighbors to use to estimate the bandwidth
* @return <tt>true</tt> if the model was fit, <tt>false</tt> if it could not be fit.
*/
public <V extends Vec> boolean setUsingData(List<V> dataSet, int k)
{
return setUsingData(dataSet, k, defaultStndDev);
}
/**
* Sets the KDE to model the density of the given data set by estimating the bandwidth by using
* the <tt>k</tt> nearest neighbors of each data point.
* @param dataSet the data set to model the density of
* @param k the number of neighbors to use to estimate the bandwidth
* @param threadpool the source of threads for computation
* @return <tt>true</tt> if the model was fit, <tt>false</tt> if it could not be fit.
*/
public <V extends Vec> boolean setUsingData(List<V> dataSet, int k, ExecutorService threadpool)
{
return setUsingData(dataSet, k, defaultStndDev, threadpool);
}
/**
* Sets the KDE to model the density of the given data set by estimating the bandwidth
* by using the <tt>k</tt> nearest neighbors of each data data point. <br>
* The bandwidth estimate is calculate as the mean of the distances of the k'th nearest
* neighbor plus <tt>stndDevs</tt> standard deviations added to the mean.
*
* @param dataSet the data set to model the density of
* @param k the number of neighbors to use to estimate the bandwidth
* @param stndDevs the multiple of the standard deviation to add to the mean of the distances
* @return <tt>true</tt> if the model was fit, <tt>false</tt> if it could not be fit.
*/
public <V extends Vec> boolean setUsingData(List<V> dataSet, int k, double stndDevs)
{
return setUsingData(dataSet, k, stndDevs, null);
}
/**
* Sets the KDE to model the density of the given data set by estimating the bandwidth
* by using the <tt>k</tt> nearest neighbors of each data data point. <br>
* The bandwidth estimate is calculate as the mean of the distances of the k'th nearest
* neighbor plus <tt>stndDevs</tt> standard deviations added to the mean.
*
* @param dataSet the data set to model the density of
* @param k the number of neighbors to use to estimate the bandwidth
* @param stndDevs the multiple of the standard deviation to add to the mean of the distances
* @param threadpool the source of threads to use for computation
* @return <tt>true</tt> if the model was fit, <tt>false</tt> if it could not be fit.
*/
public <V extends Vec> boolean setUsingData(List<V> dataSet, int k, double stndDevs, ExecutorService threadpool)
{
List<VecPaired<Vec, Integer>> indexVectorPair = new ArrayList<>(dataSet.size());
for(int i = 0; i < dataSet.size(); i++)
indexVectorPair.add(new VecPaired<>(dataSet.get(i), i));
TrainableDistanceMetric.trainIfNeeded(distanceMetric, dataSet, threadpool);
vc.build(indexVectorPair, distanceMetric);
//Take the average of the k'th neighbor distance to use as the bandwith
OnLineStatistics stats;
if(threadpool == null)//k+1 b/c the first nearest neighbor will be itself
stats = VectorCollectionUtils.getKthNeighborStats(vc, dataSet, k + 1);
else
stats = VectorCollectionUtils.getKthNeighborStats(vc, dataSet, k + 1, threadpool);
setBandwith(stats.getMean() + stats.getStandardDeviation() * stndDevs);
return true;
}
public <V extends Vec> boolean setUsingData(List<V> dataSet, ExecutorService threadpool)
{
return setUsingData(dataSet, defaultK, threadpool);
}
/**
* Sampling not yet supported
* @param count
* @param rand
* @return will not return
* @throws UnsupportedOperationException not yet implemented
*/
@Override
public List<Vec> sample(int count, Random rand)
{
//TODO implement sampling
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public KernelFunction getKernelFunction()
{
return kf;
}
public void setKernelFunction(KernelFunction kf)
{
this.kf = kf;
}
@Override
public void scaleBandwidth(double scale)
{
bandwidth *= scale;
}
}
| 15,974 | 37.868613 | 153 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/multivariate/MultivariateDistribution.java |
package jsat.distributions.multivariate;
import java.io.Serializable;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
/**
* This interface represents the contract that any continuous multivariate distribution must implement
*
* @author Edward Raff
*/
public interface MultivariateDistribution extends Cloneable, Serializable
{
/**
* Computes the log of the probability density function. If the
* probability of the input is zero, the log of zero would be
* {@link Double#NEGATIVE_INFINITY}. Instead, -{@link Double#MAX_VALUE} is returned.
*
* @param x the array for the vector the get the log probability of
* @return the log of the probability.
* @throws ArithmeticException if the vector is not the correct length, or the distribution has not yet been set
*/
default public double logPdf(double... x)
{
return logPdf(DenseVector.toDenseVec(x));
}
/**
* Computes the log of the probability density function. If the
* probability of the input is zero, the log of zero would be
* {@link Double#NEGATIVE_INFINITY}. Instead, -{@link Double#MAX_VALUE} is returned.
*
* @param x the vector the get the log probability of
* @return the log of the probability.
* @throws ArithmeticException if the vector is not the correct length, or the distribution has not yet been set
*/
public double logPdf(Vec x);
/**
* Returns the probability of a given vector from this distribution. By definition,
* the probability will always be in the range [0, 1].
*
* @param x the array of the vector the get the log probability of
* @return the probability
* @throws ArithmeticException if the vector is not the correct length, or the distribution has not yet been set
*/
default public double pdf(double... x)
{
return pdf(DenseVector.toDenseVec(x));
}
/**
* Returns the probability of a given vector from this distribution. By definition,
* the probability will always be in the range [0, 1].
*
* @param x the vector the get the log probability of
* @return the probability
* @throws ArithmeticException if the vector is not the correct length, or the distribution has not yet been set
*/
default public double pdf(Vec x)
{
return Math.exp(logPdf(x));
}
/**
* Sets the parameters of the distribution to attempt to fit the given list of vectors.
* All vectors are assumed to have the same weight.
* @param <V> the vector type
* @param dataSet the list of data points
* @return <tt>true</tt> if the distribution was fit to the data, or <tt>false</tt>
* if the distribution could not be fit to the data set.
*/
default public <V extends Vec> boolean setUsingData(List<V> dataSet)
{
return setUsingData(dataSet, false);
}
/**
* Sets the parameters of the distribution to attempt to fit the given list of vectors.
* All vectors are assumed to have the same weight.
* @param <V> the vector type
* @param dataSet the list of data points
* @param parallel {@code true} if the training should be done using
* multiple-cores, {@code false} for single threaded.
* @return <tt>true</tt> if the distribution was fit to the data, or <tt>false</tt>
* if the distribution could not be fit to the data set.
*/
public <V extends Vec> boolean setUsingData(List<V> dataSet, boolean parallel);
/**
* Sets the parameters of the distribution to attempt to fit the given list of data points.
* The {@link DataPoint#getWeight() weights} of the data points will be used.
*
* @param dataPoints the list of data points to use
* @return <tt>true</tt> if the distribution was fit to the data, or <tt>false</tt>
* if the distribution could not be fit to the data set.
*/
default public boolean setUsingDataList(List<DataPoint> dataPoints)
{
return setUsingData(dataPoints.stream().map(d->d.getNumericalValues()).collect(Collectors.toList()));
}
/**
* Sets the parameters of the distribution to attempt to fit the given list of data points.
* The {@link DataPoint#getWeight() weights} of the data points will be used.
*
* @param dataSet the data set to use
* @return <tt>true</tt> if the distribution was fit to the data, or <tt>false</tt>
* if the distribution could not be fit to the data set.
*/
default public boolean setUsingData(DataSet dataSet)
{
return setUsingData(dataSet, false);
}
/**
* Sets the parameters of the distribution to attempt to fit the given list
* of data points. The {@link DataPoint#getWeight() weights} of the data
* points will be used.
*
* @param dataSet the data set to use
* @param parallel the source of threads for computation
* @return <tt>true</tt> if the distribution was fit to the data, or
* <tt>false</tt>
* if the distribution could not be fit to the data set.
*/
default public boolean setUsingData(DataSet dataSet, boolean parallel)
{
return setUsingData(dataSet.getDataVectors(), parallel);
}
public MultivariateDistribution clone();
/**
* Performs sampling on the current distribution.
* @param count the number of iid samples to draw
* @param rand the source of randomness
* @return a list of sample vectors from this distribution
*/
public List<Vec> sample(int count, Random rand);
}
| 5,792 | 38.141892 | 116 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/multivariate/MultivariateDistributionSkeleton.java |
package jsat.distributions.multivariate;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
/**
* Common class for implementing a multivariate distribution. A number of methods are pre implemented,
* building off of the implementation of the remaining methods. <br>
* Note: the default implementation for the multithreaded methods calls the non threaded version of the method.
* The exception to this is the {@link #setUsingData(jsat.DataSet, java.util.concurrent.ExecutorService) } method,
* which calls {@link #setUsingData(java.util.List, java.util.concurrent.ExecutorService) }
*
* @author Edward Raff
*/
public abstract class MultivariateDistributionSkeleton implements MultivariateDistribution
{
private static final long serialVersionUID = 4080753806798149915L;
@Override
public double logPdf(Vec x)
{
double logPDF = Math.log(pdf(x));
if(Double.isInfinite(logPDF) && logPDF < 0)//log(0) == -Infinty
return -Double.MAX_VALUE;
return logPDF;
}
@Override
abstract public MultivariateDistribution clone();
}
| 1,234 | 31.5 | 114 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/multivariate/MultivariateKDE.java |
package jsat.distributions.multivariate;
import java.util.List;
import jsat.distributions.empirical.KernelDensityEstimator;
import jsat.distributions.empirical.kernelfunc.KernelFunction;
import jsat.linear.Vec;
import jsat.linear.VecPaired;
/**
* There are several methods of generalizing the {@link KernelDensityEstimator} to the multivariate case.
* This class provides a contract for implementations that provide a generalization of the KDE.
*
* @author Edward Raff
*/
abstract public class MultivariateKDE extends MultivariateDistributionSkeleton
{
private static final long serialVersionUID = 614136649331326270L;
/**
* Returns the list of vectors that have a non zero contribution to the
* density of the query point <tt>x</tt>. Each vector is paired with its integer index from the original constructing list vectors, and a double
* indicating its weight given the kernel function in use.
*
* @param x the query point
* @return the list of near by vectors and their weights
*/
abstract public List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> getNearby(Vec x);
/**
* Returns the list of vectors that have a non zero contribution to the density of the query point <tt>x</tt>.
* Each vector is paired with its integer index from the original constructing list vectors, and a double
* indicating its distance from the query point divided by the bandwidth of the point.
*
* @param x the query point
* @return the list of near by vectors and their weights
*/
abstract public List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> getNearbyRaw(Vec x);
/**
*
* @return the kernel function used
*/
abstract public KernelFunction getKernelFunction();
/**
* A caller may want to increase or decrease the bandwidth after training
* has been completed to get smoother model, or decrease it to observe
* behavior. This method will scaled the bandwidth of each data point by the given factor
* @param scale the value to scale the bandwidth used
*/
abstract public void scaleBandwidth(double scale);
@Override
abstract public MultivariateKDE clone();
}
| 2,253 | 37.862069 | 149 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/multivariate/NormalM.java |
package jsat.distributions.multivariate;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import jsat.linear.CholeskyDecomposition;
import jsat.linear.DenseVector;
import jsat.linear.Matrix;
import jsat.linear.MatrixStatistics;
import jsat.linear.SingularValueDecomposition;
import jsat.linear.Vec;
import static java.lang.Math.*;
import jsat.linear.IndexValue;
/**
* Class for the multivariate Normal distribution. It is often called the Multivariate Gaussian distribution.
*
* @author Edward Raff
*/
public class NormalM extends MultivariateDistributionSkeleton
{
private static final long serialVersionUID = -7043369396743253382L;
/**
* When computing the PDF of some x, part of the equation is only dependent on the covariance matrix. This part is
* <pre>
* -k
* -- -1
* 2 --
* / __\ 2
* \2 ||/ (|Sigma|)
* </pre>
* where k is the dimension, Sigma is the covariance matrix, and || denotes the determinant. <br>
* Taking the negative log of this gives
* <pre>
* / __\
* (-k) log\2 ||/ - log(|Sigma|)
* -----------------------------
* 2
* </pre>
*
* This can then be added to the log of the x dependent part, which, when exponentiated, gives the correct result of dividing by this term.
*/
private double logPDFConst;
/**
* When we compute the constant {@link #logPDFConst}, we only need the inverse of the covariance matrix.
*/
private Matrix invCovariance;
private Vec invCov_diag;
private Vec mean;
/**
* Lower triangular cholesky decomposition used for sampling such that L * L<sup>T</sup> = Covariance Matrix
*/
private Matrix L;
private Vec L_diag;
/**
* The determinant of the covariance matrix.
*/
private double log_det;
public NormalM(Vec mean, Matrix covariance)
{
setMeanCovariance(mean, covariance);
}
public NormalM(Vec mean, Vec diag_covariance)
{
this.mean = mean.clone();
setCovariance(diag_covariance);
}
public NormalM()
{
}
/**
* Sets the mean and covariance for this distribution. For an <i>n</i> dimensional distribution,
* <tt>mean</tt> should be of length <i>n</i> and <tt>covariance</tt> should be an <i>n</i> by <i>n</i> matrix.
* It is also a requirement that the matrix be symmetric positive definite.
* @param mean the mean for the distribution. A copy will be used.
* @param covariance the covariance for this distribution. A copy will be used.
* @throws ArithmeticException if the <tt>mean</tt> and <tt>covariance</tt> do not agree, or the covariance is not
* positive definite. An exception may not be throw for all bad matrices.
*/
public void setMeanCovariance(Vec mean, Matrix covariance)
{
if(!covariance.isSquare())
throw new ArithmeticException("Covariance matrix must be square");
else if(mean.length() != covariance.rows())
throw new ArithmeticException("The mean vector and matrix must have the same dimension," +
mean.length() + " does not match [" + covariance.rows() + ", " + covariance.rows() +"]" );
//Else, we are good!
this.mean = mean.clone();
setCovariance(covariance);
}
/**
* Sets the covariance matrix for this matrix.
* @param covMatrix set the covariance matrix used for this distribution
* @throws ArithmeticException if the covariance matrix is not square,
* does not agree with the mean, or is not positive definite. An
* exception may not be throw for all bad matrices.
*/
public void setCovariance(Matrix covMatrix)
{
if(!covMatrix.isSquare())
throw new ArithmeticException("Covariance matrix must be square");
else if(covMatrix.rows() != this.mean.length())
throw new ArithmeticException("Covariance matrix does not agree with the mean");
CholeskyDecomposition cd = new CholeskyDecomposition(covMatrix.clone());
System.out.println();
L = cd.getLT();
L.mutableTranspose();
log_det = cd.getLogDet();
int k = mean.length();
if(Double.isNaN(log_det) || log_det < log(1e-10))
{
//Numerical unstable or sub rank matrix. Use the SVD to work with the more stable pesudo matrix
SingularValueDecomposition svd = new SingularValueDecomposition(covMatrix.clone());
//We need the rank deficient PDF and pesude inverse
this.logPDFConst = 0.5*log(svd.getPseudoDet()) + svd.getRank()*0.5*log(2*PI);
this.invCovariance = svd.getPseudoInverse();
}
else
{
this.logPDFConst = (-k*log(2*PI)-log_det)*0.5;
this.invCovariance = cd.solve(Matrix.eye(k));
}
this.invCov_diag = null;
this.L_diag = null;
}
public void setCovariance(Vec cov_diag)
{
if(cov_diag.length()!= this.mean.length())
throw new ArithmeticException("Covariance matrix does not agree with the mean");
int k = mean.length();
log_det = 0;
for(IndexValue iv : cov_diag)
log_det += Math.log(iv.getValue());
L_diag = cov_diag.clone();
L_diag.applyFunction(Math::sqrt);//Cholesky is L*L' = C, sicne just diag, that means sqrt
invCov_diag = cov_diag.clone();
this.logPDFConst = (-k*log(2*PI)-log_det)*0.5;
this.invCov_diag.applyFunction(f->f > 0 ? 1/f : 0.0);
this.invCovariance = null;
this.L = null;
}
public Vec getMean()
{
return mean;
}
@Override
public double logPdf(Vec x)
{
if(mean == null)
throw new ArithmeticException("No mean or variance set");
Vec xMinusMean = x.subtract(mean);
//Compute the part that is depdentent on x
double xDependent;
if(invCov_diag != null)
{
xDependent = 0;
for(IndexValue iv : xMinusMean)
xDependent += iv.getValue()*iv.getValue()*invCov_diag.get(iv.getIndex());
xDependent *= -0.5;
}
else
xDependent = xMinusMean.dot(invCovariance.multiply(xMinusMean))*-0.5;
return logPDFConst + xDependent;
}
@Override
public double pdf(Vec x)
{
double pdf = exp(logPdf(x));
if(Double.isInfinite(pdf) || Double.isNaN(pdf))//Ugly numerical error has occured
return 0;
return pdf;
}
@Override
public <V extends Vec> boolean setUsingData(List<V> dataSet, boolean parallel)
{
Vec origMean = this.mean;
try
{
Vec newMean = MatrixStatistics.meanVector(dataSet);
Matrix covariance = MatrixStatistics.covarianceMatrix(newMean, dataSet);
this.mean = newMean;
setCovariance(covariance);
return true;
}
catch(ArithmeticException ex)
{
this.mean = origMean;
return false;
}
}
@Override
public NormalM clone()
{
NormalM clone = new NormalM();
if(this.invCovariance != null)
clone.invCovariance = this.invCovariance.clone();
if(this.mean != null)
clone.mean = this.mean.clone();
clone.logPDFConst = this.logPDFConst;
return clone;
}
@Override
public List<Vec> sample(int count, Random rand)
{
List<Vec> samples = new ArrayList<>(count);
Vec Z = new DenseVector(L == null ? L_diag.length() : L.rows());
for(int i = 0; i < count; i++)
{
for(int j = 0; j < Z.length(); j++)
Z.set(j, rand.nextGaussian());
Vec sample;
if(L != null)//full diag
sample = L.multiply(Z);
else
sample = L_diag.pairwiseMultiply(Z);
sample.mutableAdd(mean);
samples.add(sample);
}
return samples;
}
}
| 8,237 | 32.901235 | 144 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/multivariate/NormalMR.java | /*
* Copyright (C) 2018 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.distributions.multivariate;
import java.util.List;
import jsat.linear.DenseMatrix;
import jsat.linear.DenseVector;
import jsat.linear.Matrix;
import jsat.linear.MatrixStatistics;
import jsat.linear.Vec;
/**
* This class implements the Multivariate Normal Distribution, but augments it
* so that {@link #setUsingData(jsat.DataSet, boolean) fitting} the distribution
* uses a robust estimate of the distribution parameters. This comes at
* increased cost that is cubic with respect to the number of variables.
*
* @author Edward Raff
*/
public class NormalMR extends NormalM
{
@Override
public <V extends Vec> boolean setUsingData(List<V> dataSet, boolean parallel)
{
try
{
Vec mean = new DenseVector(dataSet.get(0).length());
Matrix cov = new DenseMatrix(mean.length(), mean.length());
MatrixStatistics.FastMCD(mean, cov, dataSet, parallel);
setMeanCovariance(mean, cov);
return true;
}
catch(Exception ex)
{
return false;
}
}
}
| 1,783 | 30.857143 | 82 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/multivariate/ProductKDE.java |
package jsat.distributions.multivariate;
import static java.lang.Math.*;
import java.util.*;
import jsat.distributions.empirical.KernelDensityEstimator;
import jsat.distributions.empirical.kernelfunc.EpanechnikovKF;
import jsat.distributions.empirical.kernelfunc.KernelFunction;
import jsat.exceptions.UntrainedModelException;
import jsat.linear.*;
import jsat.utils.IndexTable;
import jsat.utils.IntSet;
/**
* The Product Kernel Density Estimator is a generalization of the {@link KernelDensityEstimator} to the multivariate case.
* This is done by using a kernel and bandwidth for each dimension, such that the bandwidth for each dimension can be
* determined using the same methods as the univariate KDE. This can simplify the difficulty in bandwidth selection
* for the multivariate case.
*
* @author Edward Raff
* @see MetricKDE
*/
public class ProductKDE extends MultivariateKDE
{
private static final long serialVersionUID = 7298078759216991650L;
private KernelFunction k;
private double[][] sortedDimVals;
private double[] bandwidth;
private int[][] sortedIndexVals;
/**
* The original list of vectors used to create the KDE, used to avoid an expensive reconstruction of the vectors
*/
private List<Vec> originalVecs;
/**
* Creates a new KDE that uses the {@link EpanechnikovKF} kernel.
*/
public ProductKDE()
{
this(EpanechnikovKF.getInstance());
}
/**
* Creates a new KDE that uses the specified kernel
* @param k the kernel method to use
*/
public ProductKDE(KernelFunction k)
{
this.k = k;
}
@Override
public ProductKDE clone()
{
ProductKDE clone = new ProductKDE();
if(this.k != null)
clone.k = k;
if(this.sortedDimVals != null)
{
clone.sortedDimVals = new double[sortedDimVals.length][];
for(int i = 0; i < this.sortedDimVals.length; i++)
clone.sortedDimVals[i] = Arrays.copyOf(this.sortedDimVals[i], this.sortedDimVals[i].length);
}
if(this.sortedIndexVals != null)
{
clone.sortedIndexVals = new int[sortedIndexVals.length][];
for(int i = 0; i < this.sortedIndexVals.length; i++)
clone.sortedIndexVals[i] = Arrays.copyOf(this.sortedIndexVals[i], this.sortedIndexVals[i].length);
}
if(this.bandwidth != null)
clone.bandwidth = Arrays.copyOf(this.bandwidth, this.bandwidth.length);
if(this.originalVecs != null)
clone.originalVecs = new ArrayList<Vec>(this.originalVecs);
return clone;
}
@Override
public List<VecPaired<VecPaired<Vec, Integer>, Double>> getNearby(Vec x)
{
SparseVector logProd = new SparseVector(sortedDimVals[0].length);
Set<Integer> validIndecies = new IntSet();
double logH = queryWork(x, validIndecies, logProd);
List<VecPaired<VecPaired<Vec, Integer>, Double>> results = new ArrayList<>(validIndecies.size());
for(int i : validIndecies)
{
Vec v = originalVecs.get(i);
results.add(new VecPaired<>(new VecPaired<>(v, i), exp(logProd.get(i))));
}
return results;
}
@Override
public List<VecPaired<VecPaired<Vec, Integer>, Double>> getNearbyRaw(Vec x)
{
//Not entirly sure how I'm going to fix this... but this isnt technically right
throw new UnsupportedOperationException("Product KDE can not recover raw Score values");
}
@Override
public double pdf(Vec x)
{
double PDF = 0;
int N = sortedDimVals[0].length;
SparseVector logProd = new SparseVector(sortedDimVals[0].length);
Set<Integer> validIndecies = new IntSet();
double logH = queryWork(x, validIndecies, logProd);
for(int i : validIndecies)
PDF += exp(logProd.get(i)-logH);
return PDF/N;
}
/**
* Performs the main work for performing a density query.
*
* @param x the query vector
* @param validIndecies the empty set that will be altered to contain the
* indices of vectors that had a non zero contribution to the density
* @param logProd an empty sparce vector that will be modified to contain the log of the product of the
* kernels for each data point. Some indices that have zero contribution to the density will have non
* zero values. <tt>validIndecies</tt> should be used to access the correct indices.
* @return The log product of the bandwidths that normalizes the values stored in the <tt>logProd</tt> vector.
*/
private double queryWork(Vec x, Set<Integer> validIndecies, SparseVector logProd)
{
if(originalVecs == null)
throw new UntrainedModelException("Model has not yet been created, queries can not be perfomed");
double logH = 0;
for(int i = 0; i < sortedDimVals.length; i++)
{
double[] X = sortedDimVals[i];
double h = bandwidth[i];
logH += log(h);
double xi = x.get(i);
//Only values within a certain range will have an effect on the result, so we will skip to that range!
int from = Arrays.binarySearch(X, xi-h*k.cutOff());
int to = Arrays.binarySearch(X, xi+h*k.cutOff());
//Mostly likely the exact value of x is not in the list, so it retursn the inseration points
from = from < 0 ? -from-1 : from;
to = to < 0 ? -to-1 : to;
Set<Integer> subIndecies = new IntSet();
for(int j = max(0, from); j < min(X.length, to+1); j++)
{
int trueIndex = sortedIndexVals[i][j];
if(i == 0)
{
validIndecies.add(trueIndex);
logProd.set(trueIndex, log(k.k( (xi-X[j])/h )));
}
else if(validIndecies.contains(trueIndex))
{
logProd.increment(trueIndex, log(k.k( (xi-X[j])/h )));
subIndecies.add(trueIndex);
}
}
if (i > 0)
{
validIndecies.retainAll(subIndecies);
if(validIndecies.isEmpty())
break;
}
}
return logH;
}
@Override
public <V extends Vec> boolean setUsingData(List<V> dataSet, boolean parallel)
{
int dimSize = dataSet.get(0).length();
sortedDimVals = new double[dimSize][dataSet.size()];
sortedIndexVals = new int[dimSize][dataSet.size()];
bandwidth = new double[dimSize];
for(int i = 0; i < dataSet.size(); i++)
{
Vec v = dataSet.get(i);
for(int j = 0; j < v.length(); j++)
sortedDimVals[j][i] = v.get(j);
}
for(int i = 0; i < dimSize; i++)
{
IndexTable idt = new IndexTable(sortedDimVals[i]);
for( int j = 0; j < idt.length(); j++)
sortedIndexVals[i][j] = idt.index(j);
idt.apply(sortedDimVals[i]);
bandwidth[i] = KernelDensityEstimator.BandwithGuassEstimate(DenseVector.toDenseVec(sortedDimVals[i]))*dimSize;
}
this.originalVecs = (List<Vec>) dataSet;
return true;
}
@Override
public List<Vec> sample(int count, Random rand)
{
throw new UnsupportedOperationException("Not supported yet.");
}
@Override
public KernelFunction getKernelFunction()
{
return k;
}
@Override
public void scaleBandwidth(double scale)
{
for(int i = 0; i < bandwidth.length; i++)
bandwidth[i] *= 2;
}
}
| 7,868 | 34.129464 | 124 | java |
JSAT | JSAT-master/JSAT/src/jsat/distributions/multivariate/SymmetricDirichlet.java |
package jsat.distributions.multivariate;
import jsat.math.Function;
import jsat.math.optimization.NelderMead;
import java.util.ArrayList;
import jsat.distributions.Gamma;
import jsat.linear.DenseVector;
import java.util.Random;
import java.util.List;
import jsat.classifiers.DataPoint;
import jsat.linear.Vec;
import static java.lang.Math.*;
import static jsat.math.SpecialMath.*;
import jsat.utils.concurrent.ParallelUtils;
/**
* The Symmetric Dirichlet Distribution is a special case of the {@link Dirichlet} distribution, and occurs when all alphas have the same value.
*
* @author Edward Raff
*/
public class SymmetricDirichlet extends MultivariateDistributionSkeleton
{
private static final long serialVersionUID = -1206894014440494142L;
private double alpha;
private int dim;
/**
* Creates a new Symmetric Dirichlet distribution.
*
* @param alpha the positive alpha value for the distribution
* @param dim the dimension of the distribution.
* @throws ArithmeticException if a non positive alpha or dimension value is given
*/
public SymmetricDirichlet(double alpha, int dim)
{
setAlpha(alpha);
setDimension(dim);
}
/**
* Sets the dimension size of the distribution
* @param dim the new dimension size
*/
public void setDimension(int dim)
{
if(dim <= 0)
throw new ArithmeticException("A positive number of dimensions must be given");
this.dim = dim;
}
/**
* Returns the dimension size of the current distribution
* @return the number of dimensions in this distribution
*/
public int getDimension()
{
return dim;
}
/**
* Sets the alpha value used for the distribution
* @param alpha the positive value for the distribution
* @throws ArithmeticException if the value given is not a positive value
*/
public void setAlpha(double alpha) throws ArithmeticException
{
if(alpha <= 0 || Double.isNaN(alpha) || Double.isInfinite(alpha))
throw new ArithmeticException("Symmetric Dirichlet Distribution parameters must be positive, " + alpha + " is invalid");
this.alpha = alpha;
}
/**
* Returns the alpha value used by this distribution
* @return the alpha value used by this distribution
*/
public double getAlpha()
{
return alpha;
}
@Override
public SymmetricDirichlet clone()
{
return new SymmetricDirichlet(alpha, dim);
}
@Override
public double logPdf(Vec x)
{
if(x.length() != dim)
throw new ArithmeticException( dim + " variable distribution can not awnser a " + x.length() + " dimension variable");
double logVal = 0;
int K = x.length();
for(int i = 0; i < K; i++)
logVal += log(x.get(i))*(alpha-1);
logVal = logVal + lnGamma(alpha*K) - lnGamma(alpha)*K;
if(Double.isInfinite(logVal) || Double.isNaN(logVal) || abs(x.sum() - 1.0) > 1e-14)
return -Double.MAX_VALUE;
return logVal;
}
@Override
public double pdf(Vec x)
{
return exp(logPdf(x));
}
@Override
public <V extends Vec> boolean setUsingData(final List<V> dataSet, boolean parallel)
{
Function logLike = (Vec x, boolean p) ->
{
double a = x.get(0);
double constantTerm = lnGamma(a*dim);
constantTerm -= lnGamma(a)*dim;
double sum = ParallelUtils.run(p, dataSet.size(), (start, end)->
{
double local_sum = 0;
for(int i = start; i < end; i++)
{
Vec s = dataSet.get(i);
for(int j = 0; j < s.length(); j++)
local_sum += log(s.get(j))*(a-1.0);
}
return local_sum;
}, (z,b)->z+b);
return -(sum+constantTerm*dataSet.size());
};
NelderMead optimize = new NelderMead();
Vec guess = new DenseVector(1);
List<Vec> guesses = new ArrayList<>();
guesses.add(guess.add(1.0));
guesses.add(guess.add(0.1));
guesses.add(guess.add(10.0));
this.alpha = optimize.optimize(1e-10, 100, logLike, guesses, parallel).get(0);
return true;
}
@Override
public List<Vec> sample(int count, Random rand)
{
List<Vec> samples = new ArrayList<>(count);
double[] gammaSamples = new Gamma(alpha, 1.0).sample(count*dim, rand);
int samplePos = 0;
for(int i = 0; i < count; i++)
{
Vec sample = new DenseVector(dim);
for(int j = 0; j < dim; j++)
sample.set(j, gammaSamples[samplePos++]);
sample.mutableDivide(sample.sum());
samples.add(sample);
}
return samples;
}
}
| 4,987 | 29.414634 | 145 | java |
JSAT | JSAT-master/JSAT/src/jsat/driftdetectors/ADWIN.java | package jsat.driftdetectors;
import java.util.*;
import jsat.math.OnLineStatistics;
/**
* <i>Ad</i>aptive <i>Win</i>dowing (ADWIN) is an algorithm for detecting
* changes in an input stream. ADWIN maintains an approximated window of the
* input history, and works in O(log(n)) time and O(log(n)) memory, where
* <i>n</i> is the current window size. Whenever a drift is detected and
* handled, the size of the window will be reduced. <br>
* <br>
* The window in ADWIN is only for the {@code double} values passed when calling
* {@link #addSample(double, java.lang.Object) }. The object paired with the
* numeric value will <i>not</i> be compressed and is added to on every update.
* It is important to control its size using {@link #setMaxHistory(int) } when
* using ADWIN. By default, ADWIN will use a maximum history of 0. <br>
* <br>
* See: Bifet, A.,&Gavalda, R. (2007). <i>Learning from Time-Changing Data
* with Adaptive Windowing</i>. In SIAM International Conference on Data Mining.
*
* @author Edward Raff
*/
public class ADWIN<V> extends BaseDriftDetector<V>
{
private static final long serialVersionUID = 3287510845017257629L;
private double delta;
private OnLineStatistics allStats;
/**
* List of all the stats summarizing the windows. New items are added to the
* tail end of the list so that we can iterate from the "head" down. This
* makes writing the logic somewhat easier. <br>
* This means the head contains the oldest /largest items, and the tail
* contains the smallest / newest items. This is opposite of the ADWIN paper
*/
private LinkedList<OnLineStatistics> windows;
/*
* default: "We use, somewhat arbitrarily, M = 5 for all experiments" under
* section: 4 Experimental Validation of ADWIN2
*/
private int M = 5;
//Data used when a change is deteceted
private double leftMean = Double.NaN, leftVariance = Double.NaN;
private double rightMean = Double.NaN, rightVariance = Double.NaN;
/**
* Creates a new ADWIN object for detecting changes in the mean value of a
* stream of inputs. It will use a not keep any object history by default.
* @param delta the desired false positive rate
*/
public ADWIN(double delta)
{
this(delta, 0);
}
/**
* Creates a new ADWIN object for detecting changes in the mean value of a
* stream of inputs.
* @param delta the desired false positive rate
* @param maxHistory the maximum history of objects to keep
*/
public ADWIN(double delta, int maxHistory)
{
super();
setDelta(delta);
setMaxHistory(maxHistory);
allStats = new OnLineStatistics();
windows = new LinkedList<OnLineStatistics>();
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public ADWIN(ADWIN<V> toCopy)
{
super(toCopy);
this.delta = toCopy.delta;
this.allStats = toCopy.allStats.clone();
this.M = toCopy.M;
this.leftMean = toCopy.leftMean;
this.rightMean = toCopy.rightMean;
this.leftVariance = toCopy.leftVariance;
this.rightVariance = toCopy.rightVariance;
this.windows = new LinkedList<OnLineStatistics>();
for(OnLineStatistics stats : toCopy.windows)
this.windows.add(stats.clone());
}
/**
* Sets the upper bound on the false positive rate for detecting concept
* drifts
* @param delta the upper bound on false positives in (0,1)
*/
public void setDelta(double delta)
{
if(delta <= 0 || delta >= 1 || Double.isNaN(delta))
throw new IllegalArgumentException("delta must be in (0,1), not " + delta);
this.delta = delta;
}
/**
* Returns the upper bound on false positives
* @return the upper bound on false positives
*/
public double getDelta()
{
return delta;
}
/**
* This parameter controls the trade off of space and accuracy for the
* sliding window. The larger {@code M} becomes, the more accurate the
* window will be - but at the cost of execution speed.
* @param M the window space constant in [1, ∞)
*/
public void setM(int M)
{
if(M < 1)
throw new IllegalArgumentException("M must be positive, not " + M);
this.M = M;
}
/**
* Returns the accuracy / speed parameter for ADWIN
* @return the accuracy / speed parameter
*/
public int getM()
{
return M;
}
@Override
public boolean addSample(double value, V obj)
{
if(drifting)
throw new UnhandledDriftException("Drift must be handled before continuing");
time++;
addToHistory(obj);
//add to the window
allStats.add(value);
OnLineStatistics w = new OnLineStatistics();
w.add(value);
windows.addFirst(w);
//check if a change has occured
Iterator<OnLineStatistics> testIter = windows.descendingIterator();
OnLineStatistics leftStats = new OnLineStatistics();
OnLineStatistics rightStats = allStats.clone();
final double deltaPrime = delta/Math.log(allStats.getSumOfWeights());//will be > 1 in log, no issues
final double ln2delta = Math.log(2) - Math.log(deltaPrime);
final double variance_W = allStats.getVarance();
while(testIter.hasNext())
{
OnLineStatistics windowItem = testIter.next();
//accumulate left side statistics
leftStats.add(windowItem);
//decrament right side stats
rightStats.remove(windowItem);
double n_0 = leftStats.getSumOfWeights();
double n_1 = rightStats.getSumOfWeights();
double mu_0 = leftStats.getMean();
double mu_1 = rightStats.getMean();
// 1/(1/x+1/y) = x y / (x + y), and then inverse so (x+y)/(xy)
double mInv = (n_0 + n_1) / (n_0 * n_1);
double e_cut = Math.sqrt(2 * mInv * variance_W * ln2delta) + 2.0 / 3.0 * mInv * ln2delta;
if(Math.abs(mu_0 - mu_1) > e_cut)//CHANGE! OMG
{
drifting = true;
driftStart = (int) (n_0);
//set stats for them to find
leftMean = mu_0;
leftVariance = leftStats.getVarance();
rightMean = mu_1;
rightVariance = rightStats.getVarance();
/*
* we keep going incase there is a more recent start point for
* the drift, as the change in mean at the front could have been
* large enough / dramatic enough to make preciding cuts also
* look like drift
*/
}
}
compress();
return drifting;
}
/**
* Compresses the current window
*/
private void compress()
{
//compress
ListIterator<OnLineStatistics> listIter = windows.listIterator();
double lastSizeSeen = -Double.MAX_VALUE;
int lastSizeCount = 0;
while(listIter.hasNext())
{
OnLineStatistics window = listIter.next();
double n = window.getSumOfWeights();
if(n == lastSizeSeen)
{
if(++lastSizeCount > M)//compress, can only occur if there is a previous
{
listIter.previous();
window.add(listIter.previous());
listIter.remove();//remove the preivous
if(listIter.hasNext())
listIter.next();//back to where we were, which has been modified
//so nowe we must be looking at a new range since we just promoted a window
lastSizeSeen = window.getSumOfWeights();
lastSizeCount = 1;
}
}
else
{
lastSizeSeen = n;
lastSizeCount = 1;
}
}
}
/**
* Returns the mean value for all inputs contained in the current window
* @return the mean value of the window
*/
public double getMean()
{
return allStats.getMean();
}
/**
* Returns the variance for all inputs contained in the current window
* @return the variance for the window
*/
public double getVariance()
{
return allStats.getVarance();
}
/**
* Returns the standard deviation for all inputs contained in the current
* window.
* @return the standard deviation for the window
*/
public double getStndDev()
{
return allStats.getStandardDeviation();
}
/**
* This returns the current "length" of the window, which is the number of
* items that have been added to the ADWIN object since the last drift, and
* is reduced when drift occurres.
* @return the number of items stored implicitly in the window
*/
public int getWidnowLength()
{
return time;
}
/**
* Returns the mean value determined for the older values that we have
* drifted away from. <br>
* If drifting has not occurred or has already been handled,
* {@link Double#NaN} will be returned.
* @return the mean for the old values.
*/
public double getOldMean()
{
return leftMean;
}
/**
* Returns the variance for the older values that we have
* drifted away from. <br>
* If drifting has not occurred or has already been handled,
* {@link Double#NaN} will be returned.
* @return the variance for the old values
*/
public double getOldVariance()
{
return leftVariance;
}
/**
* Returns the standard deviation for the older values that we have
* drifted away from. <br>
* If drifting has not occurred or has already been handled,
* {@link Double#NaN} will be returned.
* @return the standard deviation for the old values
*/
public double getOldStndDev()
{
return Math.sqrt(leftVariance);
}
/**
* Returns the mean value determined for the newer values that we have
* drifted into. <br>
* If drifting has not occurred or has already been handled,
* {@link Double#NaN} will be returned.
* @return the mean for the newer values.
*/
public double getNewMean()
{
return rightMean;
}
/**
* Returns the variance for the newer values that we have
* drifted into. <br>
* If drifting has not occurred or has already been handled,
* {@link Double#NaN} will be returned.
* @return the variance for the newer values
*/
public double getNewVariance()
{
return rightVariance;
}
/**
* Returns the standard deviation for the newer values that we have
* drifted into. <br>
* If drifting has not occurred or has already been handled,
* {@link Double#NaN} will be returned.
* @return the standard deviation for the newer values
*/
public double getNewStndDev()
{
return Math.sqrt(rightVariance);
}
/**
* This implementation of ADWIN allows for choosing to drop either the old
* values, as is normal for a drift detector, <i>or</i> to drop the newer
* values. Passing {@code true} will result in the standard behavior of
* calling {@link #driftHandled() }. <br>
* If {@code false} is passed in to drop the <i>newer</i> values that
* drifted it is probably that continuing to add new examples will continue
* to cause detections.
*
* @param dropOld {@code true} to drop the older values out of the window
* that we drifted away from, or {@code false} to drop the newer values and
* retain the old ones.
*/
public void driftHandled(boolean dropOld)
{
/*
* Iterate through and either drop everything to the left OR the right
* Track statiscits so that we can update allStats
*/
Iterator<OnLineStatistics> testIter = windows.descendingIterator();
OnLineStatistics leftStats = new OnLineStatistics();
while (testIter.hasNext())
{
OnLineStatistics windowItem = testIter.next();
//accumulate left side statistics
if(leftStats.getSumOfWeights() < driftStart)
{
leftStats.add(windowItem);
if(dropOld)
testIter.remove();
}
else
{
if(!dropOld)
testIter.remove();
}
}
if(dropOld)
allStats.remove(leftStats);
else
allStats = leftStats;
time = (int) allStats.getSumOfWeights();
leftMean = leftVariance = rightMean = rightVariance = Double.NaN;
//Calling at the end b/c we need driftStart's value
super.driftHandled();
}
@Override
public void driftHandled()
{
this.driftHandled(true);
}
@Override
public ADWIN<V> clone()
{
return new ADWIN<V>(this);
}
}
| 13,446 | 31.877751 | 109 | java |
JSAT | JSAT-master/JSAT/src/jsat/driftdetectors/BaseDriftDetector.java | package jsat.driftdetectors;
import java.io.Serializable;
import java.util.*;
/**
* Base class for providing common functionality to drift detection algorithms
* @author Edward Raff
*/
public abstract class BaseDriftDetector<V> implements Cloneable, Serializable
{
private static final long serialVersionUID = -5857845807016446270L;
/**
* Tracks the number of updates / trial scene. May be reset as needed, so
* long as it increases compared to {@link #driftStart}
*/
protected int time = 0;
/**
* Controls the maximum amount of history to keep
*/
protected int maxHistory = Integer.MAX_VALUE;
/**
* Set to {@code true} to indicate that a warning mode in in effect.
*/
protected boolean warning = false;
/**
* Set to {@code true} to indicate that concept drift has occurred
*/
protected boolean drifting = false;
/**
* Set this value to the time point where the drift is believed to have
* started from. Set to -1 to indicate no drift
*/
protected int driftStart = -1;
/**
* Holds the associated object history. The history is always FIFO, with the
* end (tail) of the queue containing the most recent object, and the front
* (head) containing the oldest object.
*/
protected Deque<V> history;
protected BaseDriftDetector()
{
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
protected BaseDriftDetector(BaseDriftDetector<V> toCopy)
{
this.time = toCopy.time;
this.maxHistory = toCopy.maxHistory;
this.warning = toCopy.warning;
this.driftStart = toCopy.driftStart;
if(toCopy.history != null)
{
this.history = new ArrayDeque<V>(toCopy.history.size());
for(V v : toCopy.history)
this.history.add(v);
}
}
/**
* Returns {@code true} if the algorithm is in a warning state. This state
* indicates that the algorithm believes concept drift may be occurring, but
* is not confident enough to say that is had definitely occurred. <br>
* Not all algorithms will raise a warning state, and some may only begin
* keeping track of history once in the warning state.
* @return {@code true} if concept drift may have started, but is not sure
*/
public boolean isWarning()
{
return warning;
}
/**
* Returns {@code true} if the algorithm believes that drift has definitely
* occurred. At this
* @return
*/
public boolean isDrifting()
{
return drifting;
}
/**
* Returns the maximum number of items that will be kept in the history.
* @return the maximum number of items that will be kept in the history.
*/
public int getMaxHistory()
{
return maxHistory;
}
/**
* Sets the maximum number of items to store in history. Setting this to
* {@code 0} will keep the detector from ever storing history. <br>
* The user can still keep their own independent history or checkpoints by
* using the {@link #isDrifting() } and {@link #isWarning() } methods. <br>
* <br>
* The history size may be changed at any time, but may result in the loss
* of history.
*
* @param maxHistory the new maximum history size of objects added
*/
public void setMaxHistory(int maxHistory)
{
this.maxHistory = maxHistory;
if(history != null)
if (this.maxHistory == 0)
history.clear();
else
while (history.size() > maxHistory)
history.removeFirst();
}
/**
* Adds the given item to the history, creating a new history holder if
* needed. This method handles the cases where the max history is zero,
* and when the history is full (dropping the oldest)
* @param obj the object to add to the history
*/
protected void addToHistory(V obj)
{
if(maxHistory < 1)
return;
if(history == null)
if (maxHistory != Integer.MAX_VALUE)//we probably set it to a reasonable value
{
try
{
history = new ArrayDeque<V>(maxHistory);
}
catch (Exception ex)
{
//what is we cause one of the many OOM exceptiosn b/c initial history was too big?
//AKA we googed on being helpful
history = new ArrayDeque<V>();
}
}
else
history = new ArrayDeque<V>();
if(history.size() == maxHistory)//make room
history.removeFirst();
history.add(obj);
}
/**
* Clears the current history
*/
public void clearHistory()
{
if(history != null)
history.clear();
}
/**
* Returns the number of items in recent history that differed from the
* historical values, or {@code -1} if there has not been any detected
* drift. This method will return {@code -1} even if {@link #isWarning() }
* is {@code true}.
* @return the number of updates ago that the drift started, or {@code -1}
* if no drift has occurred
*/
public int getDriftAge()
{
if(driftStart == -1)
return -1;
return time-driftStart;
}
/**
* Returns a new list containing up to {@link #getMaxHistory() } objects in
* the history that drifted away from the prior state of the model. <br>
* The 0 index in the list will be the most recently added item, and the
* largest index will be the oldest item.
* @return the list of objects that make up the effected history
*/
public List<V> getDriftedHistory()
{
int historyToGram = Math.min(time - driftStart, history.size());
ArrayList<V> histList = new ArrayList<V>(historyToGram);
Iterator<V> histIter = history.descendingIterator();
while(histIter.hasNext() && historyToGram > 0)
{
historyToGram--;
histList.add(histIter.next());
}
return histList;
}
/**
* Adds a new point to the drift detector. If an escalation in state occurs,
* {@code true} will be returned. A change of state could be either drift
* occurring {@link #isDrifting} or a warning state {@link #isWarning}.
* If the detector was in a warning state and then goes back to normal,
* {@code false} will be returned. <br>
* <br>
* For binary (true / false) drift detectors, {@code value} will be
* considered {@code false} if and only if its value is equal to zero. Any
* non zero value will be treated as {@code true} <br>
* <br>
* Objects added with the value may or may not be added to the history, the
* behavior is algorithm dependent. Some may always add it to the history,
* while others will only begin collecting history once a warning state
* occurs.
*
* @param value the numeric value to add to the drift detector
* @param obj the object associated with this value. It may or may not be
* stored in the detectors history
* @return {@code true} if a drift has or may be starting.
* @throws UnhandledDriftException if {@link #driftHandled() } is not called
* after drifting is detected
*/
public abstract boolean addSample(double value, V obj);
/**
* This method should be called once the drift is handled. Once done, this
* method will clear the flags and prepare the detector to continue tracking
* drift again. <br>
* By using this method, one can continue to track multiple future drift
* changes without having to feed the history data (which may be incomplete)
* into a new detector object.
*/
public void driftHandled()
{
warning = drifting = false;
driftStart = -1;
}
@Override
abstract public Object clone();
}
| 8,189 | 32.565574 | 102 | java |
JSAT | JSAT-master/JSAT/src/jsat/driftdetectors/DDM.java | package jsat.driftdetectors;
/**
* DDM (Drift Detection Method) is a drift detector for binary events, and is
* meant to detect decreases in the success rate over time. As such it will not
* inform of any positive drift. <br>
* <br>
* This drift detector supports a warning state, and will not begin to store
* the object history until a warning state begins. If the warning state ends
* before a detection of drift occurs, the history will be dropped.
* <br>
* See: Gama, J., Medas, P., Castillo, G.,&Rodrigues, P. (2004). <i>Learning
* with Drift Detection</i>. In A. C. Bazzan&S. Labidi (Eds.), Advances in
* Artificial Intelligence – SBIA 2004 (pp. 286–295). Springer Berlin
* Heidelberg. doi:10.1007/978-3-540-28645-5_29
*
* @author Edward Raff
*/
public class DDM<V> extends BaseDriftDetector<V>
{
private static final long serialVersionUID = 3023405445609636195L;
/**
* Number of times we won the trial
*/
private int fails;
private int minSamples = 30;
private double p_min;
private double s_min;
private double warningThreshold;
private double driftThreshold;
/**
* Creates a new DDM drift detector using the default warning and drift
* thresholds of 2 and 3 respectively.
*/
public DDM()
{
this(2, 3);
}
/**
* Creates a new DDM drift detector
* @param warningThreshold the threshold for starting a warning state
* @param driftThreshold the threshold for recognizing a drift
*/
public DDM(double warningThreshold, double driftThreshold)
{
super();
setWarningThreshold(warningThreshold);
setDriftThreshold(driftThreshold);
driftHandled();
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public DDM(DDM<V> toCopy)
{
super(toCopy);
this.fails = toCopy.fails;
this.warningThreshold = toCopy.warningThreshold;
this.driftThreshold = toCopy.driftThreshold;
}
/**
* Returns the current estimate of the success rate (number of {@code true}
* inputs) for the model.
* @return the current estimate of the success rate
*/
public double getSuccessRate()
{
return 1.0-fails/(double)time;
}
/**
* Adds a new boolean trial to the detector, with the goal of detecting when
* the number of successful trials ({@code true}) drifts to a new value.
* This detector begins storing a history of the {@code obj} inputs only
* once it has entered a warning state. <br>
* This detector is specifically meant to detect drops in the success rate,
* and will not cause any warning or drift detections for increases in the
* success rate.
* @param trial the result of the trial
* @param obj the object to associate with the trial
* @return {@code true} if we are in a warning or drift state,
* {@code false } if we are not
*/
public boolean addSample(boolean trial, V obj)
{
if(drifting)
throw new UnhandledDriftException();
if(!trial)
fails++;
time++;
if(time < minSamples)
return false;
final double p_i = fails/(double)time;
final double s_i = Math.sqrt(p_i*(1-p_i)/time);
final double ps = p_i+s_i;
//values are updated when pi +si is lower than pmin +smin
if(ps < p_min + s_min)
{
p_min = p_i;
s_min = s_i;
}
if (ps > p_min + warningThreshold * s_min)
{
if(!warning)//first entry
{
warning = true;
driftStart = time - 1;
}
addToHistory(obj);
if (ps > p_min + driftThreshold * s_min)
{
warning = false;
drifting = true;
}
return true;
}
else//everything is good
{
warning = false;
driftStart = -1;
clearHistory();
return false;
}
}
/**
* Sets the multiplier on the standard deviation that must be exceeded to
* initiate a warning state. Once in the warning state, DDM will begin to
* collect a history of the inputs <br>
* Increasing the warning threshold makes it take longer to start detecting
* a change, but reduces false positives. <br>
* If the warning threshold is set above the
* {@link #setDriftThreshold(double) }, the drift state will not occur until
* the warning state is reached, and the warning state will be skipped.
* @param warningThreshold the positive multiplier threshold for starting a
* warning state
*/
public void setWarningThreshold(double warningThreshold)
{
if(warningThreshold <= 0 || Double.isNaN(warningThreshold) || Double.isInfinite(warningThreshold))
throw new IllegalArgumentException("warning threshold must be positive, not " + warningThreshold);
this.warningThreshold = warningThreshold;
}
/**
* Returns the threshold multiple for controlling the false positive /
* negative rate on detecting changes.
* @return the threshold multiple for controlling warning detection
*/
public double getWarningThreshold()
{
return warningThreshold;
}
/**
* Sets the multiplier on the standard deviation that must be exceeded to
* recognize the change as a drift. <br>
* Increasing the drift threshold makes it take longer to start detecting
* a change, but reduces false positives.
* @param driftThreshold the positive multiplier threshold for detecting a
* drift
*/
public void setDriftThreshold(double driftThreshold)
{
if(driftThreshold <= 0 || Double.isNaN(driftThreshold) || Double.isInfinite(driftThreshold))
throw new IllegalArgumentException("Dritf threshold must be positive, not " + driftThreshold);
this.driftThreshold = driftThreshold;
}
/**
* Returns the threshold multiple for controlling the false positive /
* negative rate on detecting changes.
* @return the threshold for controlling drift detection
*/
public double getDriftThreshold()
{
return driftThreshold;
}
@Override
public boolean addSample(double value, V obj)
{
return addSample(value == 0.0, obj);
}
@Override
public void driftHandled()
{
super.driftHandled();
fails = 0;
p_min = s_min = Double.POSITIVE_INFINITY;
time = 0;
clearHistory();
}
@Override
public DDM<V> clone()
{
return new DDM<V>(this);
}
}
| 6,837 | 30.511521 | 110 | java |
JSAT | JSAT-master/JSAT/src/jsat/driftdetectors/UnhandledDriftException.java | package jsat.driftdetectors;
/**
* This exception is thrown when a drift detector receives new data even through
* the drift was not handled.
*
* @author Edward Raff
*/
public class UnhandledDriftException extends RuntimeException
{
private static final long serialVersionUID = -5781626293819651067L;
public UnhandledDriftException()
{
super();
}
public UnhandledDriftException(String message)
{
super(message);
}
}
| 467 | 17.72 | 81 | java |
JSAT | JSAT-master/JSAT/src/jsat/exceptions/FailedToFitException.java |
package jsat.exceptions;
/**
*
* @author Edward Raff
*/
public class FailedToFitException extends RuntimeException
{
private static final long serialVersionUID = 2982189541225068993L;
private Exception faultException;
public FailedToFitException(Exception faultException, String message)
{
super(message);
this.faultException = faultException;
}
public FailedToFitException(Exception faultException, Throwable cause)
{
super(cause);
this.faultException = faultException;
}
public FailedToFitException(Exception faultException, String message, Throwable cause)
{
super(message, cause);
this.faultException = faultException;
}
public FailedToFitException(Exception faultException)
{
super(faultException.getMessage());
this.faultException = faultException;
}
public FailedToFitException(String string)
{
super(string);
}
/**
* Returns the exception that caused the issue. If no exception occurred
* that caused the failure to fit, the value returned will be null.
* @return the exception that caused the issue.
*/
public Exception getFaultException()
{
return faultException;
}
}
| 1,284 | 22.363636 | 90 | java |
JSAT | JSAT-master/JSAT/src/jsat/exceptions/ModelMismatchException.java |
package jsat.exceptions;
/**
* This exception is thrown when the input into a model does not match the expectation of the model.
* @author Edward Raff
*/
public class ModelMismatchException extends RuntimeException
{
private static final long serialVersionUID = 6962636868667470816L;
public ModelMismatchException(String message, Throwable cause)
{
super(message, cause);
}
public ModelMismatchException(Throwable cause)
{
super(cause);
}
public ModelMismatchException(String message)
{
super(message);
}
public ModelMismatchException()
{
super();
}
}
| 647 | 17.514286 | 101 | java |
JSAT | JSAT-master/JSAT/src/jsat/exceptions/UntrainedModelException.java |
package jsat.exceptions;
/**
* This exception is thrown when someone attempts to use a model that has not been trained or constructed.
* @author Edward Raff
*/
public class UntrainedModelException extends RuntimeException
{
private static final long serialVersionUID = 3693546100471013277L;
public UntrainedModelException(String message, Throwable cause)
{
super(message, cause);
}
public UntrainedModelException(Throwable cause)
{
super(cause);
}
public UntrainedModelException(String message)
{
super(message);
}
public UntrainedModelException()
{
super();
}
}
| 658 | 17.828571 | 107 | java |
JSAT | JSAT-master/JSAT/src/jsat/io/ARFFLoader.java |
package jsat.io;
import java.io.*;
import java.util.*;
import java.util.Map.Entry;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import jsat.DataSet;
import jsat.DataStore;
import jsat.SimpleDataSet;
import jsat.classifiers.*;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.regression.RegressionDataSet;
import jsat.utils.DoubleList;
/**
* Class for loading ARFF files. ARFF is a human readable file format used by
* Weka. The ARFF file formal allows for attributes that have missing
* information, which are encoded in JSAT as {@link Double#NaN} for numeric
* features, and a {@code -1} for categorical features.
*
* <br>
* <a href="http://www.cs.waikato.ac.nz/ml/weka/arff.html">About Weka</a>
* @author Edward Raff
*/
public class ARFFLoader
{
/**
* Uses the given file path to load a data set from an ARFF file.
*
* @param file the path to the ARFF file to load
* @return the data set from the ARFF file, or null if the file could not be loaded.
*/
public static SimpleDataSet loadArffFile(File file)
{
try
{
return loadArffFile(new FileReader(file));
}
catch (FileNotFoundException ex)
{
Logger.getLogger(ARFFLoader.class.getName()).log(Level.SEVERE, null, ex);
return null;
}
}
/**
* Uses the given reader to load a data set assuming it follows the ARFF
* file format
* @param input the reader to load the data set from
* @return the data set from the stream, or null of the file could not be loaded
*/
public static SimpleDataSet loadArffFile(Reader input)
{
return loadArffFile(input, DataStore.DEFAULT_STORE);
}
/**
* Uses the given reader to load a data set assuming it follows the ARFF
* file format
* @param input the reader to load the data set from
* @param store the data store to use
* @return the data set from the stream, or null of the file could not be loaded
*/
public static SimpleDataSet loadArffFile(Reader input, DataStore store)
{
DataStore list = store.emptyClone();
DoubleList weights = new DoubleList();
BufferedReader br = new BufferedReader(input);
int numOfVars = 0;
int numReal = 0;
List<Boolean> isReal = new ArrayList<>();
List<String> variableNames = new ArrayList<>();
List<HashMap<String, Integer>> catVals = new ArrayList<>();
String line = null;
CategoricalData[] categoricalData = null;
try
{
boolean atData = false;
while( (line = br.readLine()) != null )
{
if(line.startsWith("%") || line.trim().isEmpty())
continue;///Its a comment, skip
line = line.trim();
if(line.startsWith("@") && !atData)
{
line = line.substring(1).toLowerCase();
if(line.toLowerCase().startsWith("data"))
{
categoricalData = new CategoricalData[numOfVars-numReal];
int k = 0;
for(int i = 0; i < catVals.size(); i++)
{
if(catVals.get(i) != null)
{
categoricalData[k] = new CategoricalData(catVals.get(i).size());
categoricalData[k].setCategoryName(variableNames.get(i));
for(Entry<String, Integer> entry : catVals.get(i).entrySet())
categoricalData[k].setOptionName(entry.getKey(), entry.getValue());
k++;
}
}
//prept to start reading in data
list.setNumNumeric(numReal);
list.setCategoricalDataInfo(categoricalData);
atData = true;
continue;
}
else if(!line.toLowerCase().startsWith("attribute"))
continue;
numOfVars++;
line = line.substring("attribute".length()).trim();//Remove the space, it could be multiple spaces
String variableName = null;
line = line.replace("\t", " ");
if(line.startsWith("'"))
{
Pattern p = Pattern.compile("'.+?'");
Matcher m = p.matcher(line);
m.find();
variableName = nameTrim(m.group());
line = line.replaceFirst("'.+?'", "placeHolder");
}
else
variableName = nameTrim(line.trim().replaceAll("\\s+.*", ""));
variableNames.add(variableName);
String[] tmp = line.split("\\s+", 2);
if(tmp[1].trim().equals("real") || tmp[1].trim().equals("numeric") || tmp[1].trim().startsWith("integer"))
{
numReal++;
isReal.add(true);
catVals.add(null);
}
else//Not correct, but we arent supporting anything other than real and categorical right now
{
isReal.add(false);
String cats = tmp[1].replace("{", "").replace("}", "").trim();
if(cats.endsWith(","))
cats = cats.substring(0, cats.length()-1);
String[] catValsRaw = cats.split(",");
HashMap<String, Integer> tempMap = new HashMap<String, Integer>();
for(int i = 0; i < catValsRaw.length; i++)
{
catValsRaw[i] = nameTrim(catValsRaw[i]);
tempMap.put(catValsRaw[i], i);
}
catVals.add(tempMap);
}
}
else if(atData && !line.isEmpty())
{
double weight = 1.0;
String[] tmp = line.split(",");
if(tmp.length != isReal.size())
{
String s = tmp[isReal.size()];
if(tmp.length == isReal.size()+1)//{#} means the # is the weight
{
if(!s.matches("\\{\\d+(\\.\\d+)?\\}"))
throw new RuntimeException("extra column must indicate a data point weigh in the form of \"{#}\", instead bad token " + s + " was found");
weight = Double.parseDouble(s.substring(1, s.length()-1));
}
else
{
throw new RuntimeException("Column had " + tmp.length + " values instead of " + isReal.size());
}
}
DenseVector vec = new DenseVector(numReal);
int[] cats = new int[numOfVars - numReal];
int k = 0;//Keeping track of position in cats
for(int i = 0; i < isReal.size(); i++)
{
String val_string = tmp[i].trim();
if (isReal.get(i))
if (val_string.equals("?"))//missing value, indicated by NaN
vec.set(i - k, Double.NaN);
else
vec.set(i - k, Double.parseDouble(val_string));
else//Categorical
{
tmp[i] = nameTrim(tmp[i]).trim().toLowerCase();
if(tmp[i].equals("?"))//missing value, indicated by -1
cats[k++] = -1;
else
cats[k++] = catVals.get(i).get(tmp[i]);
}
}
list.addDataPoint(new DataPoint(vec, cats, categoricalData));
weights.add(weight);
}
}
}
catch (IOException ex)
{
}
SimpleDataSet dataSet = new SimpleDataSet(list);
for(int i = 0; i < weights.size(); i++)
dataSet.setWeight(i, weights.getD(i));
int k = 0;
for (int i = 0; i < isReal.size(); i++)
if (isReal.get(i))
dataSet.setNumericName(variableNames.get(i), k++);
return dataSet;
}
public static void writeArffFile(DataSet data, OutputStream os) {
writeArffFile(data, os, "Default_Relation");
}
/**
* Writes out the dataset as an ARFF file to the given stream. This method
* will automatically handle the target variable of
* {@link ClassificationDataSet} and {@link RegressionDataSet}.
*
* @param data the dataset to write out
* @param os the output stream to write too
* @param relation the relation label to write out
*/
public static void writeArffFile(DataSet data, OutputStream os, String relation)
{
PrintWriter writer = new PrintWriter(os);
//write out the relation tag
writer.write(String.format("@relation %s\n", addQuotes(relation)));
//write out attributes
//first all categorical features
CategoricalData[] catInfo = data.getCategories();
for( CategoricalData cate : catInfo)
{
writeCatVar(writer, cate);
}
//write out all numeric features
for(int i = 0; i < data.getNumNumericalVars(); i++)
{
String name = data.getNumericName(i);
writer.write("@attribute " + (name == null ? "num" + i : name.replaceAll("\\s+", "-")) + " NUMERIC\n");
}
if(data instanceof ClassificationDataSet)//also write out class variable
writeCatVar(writer, ((ClassificationDataSet)data).getPredicting());
if(data instanceof RegressionDataSet)
writer.write("@ATTRIBUTE target NUMERIC\n");
writer.write("@DATA\n");
for(int row = 0; row < data.size(); row++)
{
DataPoint dp = data.getDataPoint(row);
boolean firstFeature = true;
//cat vars first
for(int i = 0; i < catInfo.length; i++)
{
if(!firstFeature)
writer.write(",");
firstFeature = false;
int cat_val = dp.getCategoricalValue(i);
if(cat_val < 0)
writer.write("?");
else
writer.write(addQuotes(catInfo[i].getOptionName(cat_val)));
}
//numeric vars
Vec v = dp.getNumericalValues();
for(int i = 0; i < v.length(); i++)
{
if(!firstFeature)
writer.write(",");
firstFeature = false;
double val = v.get(i);
if(Double.isNaN(val))//missing value case
writer.write("?");
else if(Math.rint(val) == val)//cast to long before writting to save space
writer.write(Long.toString((long) val));
else
writer.write(Double.toString(val));
}
if (data instanceof ClassificationDataSet)//also write out class variable
{
if(!firstFeature)
writer.write(",");
firstFeature = false;
ClassificationDataSet cdata = (ClassificationDataSet) data;
writer.write(addQuotes(cdata.getPredicting().getOptionName(cdata.getDataPointCategory(row))));
}
if (data instanceof RegressionDataSet)
{
if(!firstFeature)
writer.write(",");
firstFeature = false;
writer.write(Double.toString(((RegressionDataSet)data).getTargetValue(row)));
}
writer.write("\n");
}
writer.flush();
}
private static String addQuotes(String string)
{
if(string.contains(" "))
return "\"" + string + "\"";
else
return string;
}
private static void writeCatVar(PrintWriter writer, CategoricalData cate)
{
writer.write("@ATTRIBUTE " + cate.getCategoryName().replaceAll("\\s+", "-") + " {" );
for(int i = 0; i < cate.getNumOfCategories(); i++)
{
if(i != 0)
writer.write(",");
writer.write(addQuotes(cate.getOptionName(i)));
}
writer.write("}\n");
}
/**
* Removes the quotes at the end and front of a string if there are any, as well as spaces at the front and end
* @param in the string to trim
* @return the white space and quote trimmed string
*/
private static String nameTrim(String in)
{
in = in.trim();
if(in.startsWith("'") || in.startsWith("\""))
in = in.substring(1);
if(in.endsWith("'") || in.startsWith("\""))
in = in.substring(0, in.length()-1);
return in.trim();
}
}
| 13,988 | 38.854701 | 170 | java |
JSAT | JSAT-master/JSAT/src/jsat/io/CSV.java | /*
* Copyright (C) 2015 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.io;
import java.io.*;
import java.util.*;
import jsat.DataSet;
import jsat.classifiers.*;
import jsat.linear.Vec;
import jsat.regression.RegressionDataSet;
import jsat.utils.DoubleList;
import jsat.utils.StringUtils;
import static java.lang.Character.isWhitespace;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Path;
import jsat.DataStore;
import jsat.SimpleDataSet;
import jsat.linear.*;
import jsat.utils.*;
/**
* Provides a reader and writer for CSV style datasets. This CSV reader supports
* comments in CSVs (must begin with a single character) and categorical
* features (columns must be specified when calling). Any number of newlines
* will be treated as a single newline separating two rows.<br>
* <br>
* When reading and writing a CSV, if the delimiter or comment markers are not
* specified - the defaults will be used {@link #DEFAULT_DELIMITER} and
* {@link #DEFAULT_COMMENT} respectively.<br>
* <br>
* The CSV loader will treat empty columns as missing values for both numeric
* and categorical features. A value of "NaN" in a numeric column will also be
* treated as a missing value. Once loaded, missing values for numeric features
* are encoded as {@link Double#NaN} and as <i>-1</i> for categorical features.
*
* @author Edward Raff
*/
public class CSV
{
public static final char DEFAULT_DELIMITER = ',';
public static final char DEFAULT_COMMENT = '#';
private CSV()
{
}
/**
* Reads in a CSV dataset as a regression dataset.
*
* @param numeric_target_column the column index (starting from zero) of the
* feature that will be the target regression value
* @param path the reader for the CSV content
* @param lines_to_skip the number of lines to skip when reading in the CSV
* (used to skip header information)
* @param cat_cols a set of the indices to treat as categorical features.
* @return the regression dataset from the given CSV file
* @throws IOException
*/
public static RegressionDataSet readR(int numeric_target_column, Path path, int lines_to_skip, Set<Integer> cat_cols) throws IOException
{
return readR(numeric_target_column, path, DEFAULT_DELIMITER, lines_to_skip, DEFAULT_COMMENT, cat_cols);
}
/**
* Reads in a CSV dataset as a regression dataset.
*
* @param numeric_target_column the column index (starting from zero) of the
* feature that will be the target regression value
* @param reader the reader for the CSV content
* @param lines_to_skip the number of lines to skip when reading in the CSV
* (used to skip header information)
* @param cat_cols a set of the indices to treat as categorical features.
* @return the regression dataset from the given CSV file
* @throws IOException
*/
public static RegressionDataSet readR(int numeric_target_column, Reader reader, int lines_to_skip, Set<Integer> cat_cols) throws IOException
{
return readR(numeric_target_column, reader, DEFAULT_DELIMITER, lines_to_skip, DEFAULT_COMMENT, cat_cols);
}
/**
* Reads in a CSV dataset as a regression dataset.
*
* @param numeric_target_column the column index (starting from zero) of the
* feature that will be the target regression value
* @param path the CSV file to read
* @param delimiter the delimiter to separate columns, usually a comma
* @param lines_to_skip the number of lines to skip when reading in the CSV
* (used to skip header information)
* @param comment the character used to indicate the start of a comment.
* Once this character is reached, anything at and after the character will
* be ignored.
* @param cat_cols a set of the indices to treat as categorical features.
* @return the regression dataset from the given CSV file
* @throws IOException
*/
public static RegressionDataSet readR(int numeric_target_column, Path path, char delimiter, int lines_to_skip, char comment, Set<Integer> cat_cols) throws IOException
{
BufferedReader br = Files.newBufferedReader(path, Charset.defaultCharset());
RegressionDataSet ret = readR(numeric_target_column, br, delimiter, lines_to_skip, comment, cat_cols);
br.close();
return ret;
}
/**
* Reads in a CSV dataset as a regression dataset.
*
* @param numeric_target_column the column index (starting from zero) of the
* feature that will be the target regression value
* @param reader the reader for the CSV content
* @param delimiter the delimiter to separate columns, usually a comma
* @param lines_to_skip the number of lines to skip when reading in the CSV
* (used to skip header information)
* @param comment the character used to indicate the start of a comment.
* Once this character is reached, anything at and after the character will
* be ignored.
* @param cat_cols a set of the indices to treat as categorical features.
* @return the regression dataset from the given CSV file
* @throws IOException
*/
public static RegressionDataSet readR(int numeric_target_column, Reader reader, char delimiter, int lines_to_skip, char comment, Set<Integer> cat_cols) throws IOException
{
return (RegressionDataSet) readCSV(reader, lines_to_skip, delimiter, comment, cat_cols, numeric_target_column, -1);
}
/**
* Reads in a CSV dataset as a classification dataset. Comments assumed to
* start with the "#" symbol.
*
* @param classification_target the column index (starting from zero) of the
* feature that will be the categorical target value
* @param path the CSV file to read
* @param lines_to_skip the number of lines to skip when reading in the CSV
* (used to skip header information)
* @param cat_cols a set of the indices to treat as categorical features.
* @return the classification dataset from the given CSV file
* @throws IOException
*/
public static ClassificationDataSet readC(int classification_target, Path path, int lines_to_skip, Set<Integer> cat_cols) throws IOException
{
return readC(classification_target, path, DEFAULT_DELIMITER, lines_to_skip, DEFAULT_COMMENT, cat_cols);
}
/**
* Reads in a CSV dataset as a classification dataset. Comments assumed to
* start with the "#" symbol.
*
* @param classification_target the column index (starting from zero) of the
* feature that will be the categorical target value
* @param reader the reader for the CSV content
* @param lines_to_skip the number of lines to skip when reading in the CSV
* (used to skip header information)
* @param cat_cols a set of the indices to treat as categorical features.
* @return the classification dataset from the given CSV file
* @throws IOException
*/
public static ClassificationDataSet readC(int classification_target, Reader reader, int lines_to_skip, Set<Integer> cat_cols) throws IOException
{
return readC(classification_target, reader, DEFAULT_DELIMITER, lines_to_skip, DEFAULT_COMMENT, cat_cols);
}
/**
* Reads in a CSV dataset as a classification dataset.
*
* @param classification_target the column index (starting from zero) of the
* feature that will be the categorical target value
* @param reader the reader for the CSV content
* @param delimiter the delimiter to separate columns, usually a comma
* @param lines_to_skip the number of lines to skip when reading in the CSV
* (used to skip header information)
* @param comment the character used to indicate the start of a comment.
* Once this character is reached, anything at and after the character will
* be ignored.
* @param cat_cols a set of the indices to treat as categorical features.
* @return the classification dataset from the given CSV file
* @throws IOException
*/
public static ClassificationDataSet readC(int classification_target, Reader reader, char delimiter, int lines_to_skip, char comment, Set<Integer> cat_cols) throws IOException
{
return (ClassificationDataSet) readCSV(reader, lines_to_skip, delimiter, comment, cat_cols, -1, classification_target);
}
/**
* Reads in a CSV dataset as a classification dataset.
*
* @param classification_target the column index (starting from zero) of the
* feature that will be the categorical target value
* @param path the CSV file
* @param delimiter the delimiter to separate columns, usually a comma
* @param lines_to_skip the number of lines to skip when reading in the CSV
* (used to skip header information)
* @param comment the character used to indicate the start of a comment.
* Once this character is reached, anything at and after the character will
* be ignored.
* @param cat_cols a set of the indices to treat as categorical features.
* @return the classification dataset from the given CSV file
* @throws IOException
*/
public static ClassificationDataSet readC(int classification_target, Path path, char delimiter, int lines_to_skip, char comment, Set<Integer> cat_cols) throws IOException
{
BufferedReader br = Files.newBufferedReader(path, Charset.defaultCharset());
ClassificationDataSet ret = readC(classification_target, br, delimiter, lines_to_skip, comment, cat_cols);
br.close();
return ret;
}
/**
* Reads in the given CSV dataset as a simple CSV file
* @param path the CSV file
* @param lines_to_skip the number of lines to skip when reading in the CSV
* (used to skip header information)
* @param cat_cols a set of the indices to treat as categorical features.
* @return a simple dataset of the given CSV file
* @throws IOException
*/
public static SimpleDataSet read(Path path, int lines_to_skip, Set<Integer> cat_cols) throws IOException
{
return read(path, DEFAULT_DELIMITER, lines_to_skip, DEFAULT_COMMENT, cat_cols);
}
/**
* Reads in the given CSV dataset as a simple CSV file
* @param reader the reader for the CSV content
* @param lines_to_skip the number of lines to skip when reading in the CSV
* (used to skip header information)
* @param cat_cols a set of the indices to treat as categorical features.
* @return a simple dataset of the given CSV file
* @throws IOException
*/
public static SimpleDataSet read(Reader reader, int lines_to_skip, Set<Integer> cat_cols) throws IOException
{
return read(reader, DEFAULT_DELIMITER, lines_to_skip, DEFAULT_COMMENT, cat_cols);
}
/**
* Reads in the given CSV dataset as a simple CSV file
* @param path the CSV file to read
* @param delimiter the delimiter to separate columns, usually a comma
* @param lines_to_skip the number of lines to skip when reading in the CSV
* (used to skip header information)
* @param comment the character used to indicate the start of a comment.
* Once this character is reached, anything at and after the character will
* be ignored.
* @param cat_cols a set of the indices to treat as categorical features.
* @return a simple dataset of the given CSV file
* @throws IOException
*/
public static SimpleDataSet read(Path path, char delimiter, int lines_to_skip, char comment, Set<Integer> cat_cols) throws IOException
{
BufferedReader br = Files.newBufferedReader(path, Charset.defaultCharset());
SimpleDataSet ret = read(br, delimiter, lines_to_skip, comment, cat_cols);
br.close();
return ret;
}
/**
* Reads in the given CSV dataset as a simple CSV file
* @param reader the reader for the CSV content
* @param delimiter the delimiter to separate columns, usually a comma
* @param lines_to_skip the number of lines to skip when reading in the CSV
* (used to skip header information)
* @param comment the character used to indicate the start of a comment.
* Once this character is reached, anything at and after the character will
* be ignored.
* @param cat_cols a set of the indices to treat as categorical features.
* @return a simple dataset of the given CSV file
* @throws IOException
*/
public static SimpleDataSet read(Reader reader, char delimiter, int lines_to_skip, char comment, Set<Integer> cat_cols) throws IOException
{
return (SimpleDataSet) readCSV(reader, lines_to_skip, delimiter, comment, cat_cols, -1, -1);
}
private static DataSet<?> readCSV(Reader reader, int lines_to_skip, char delimiter, char comment, Set<Integer> cat_col, int numeric_target, int cat_target) throws IOException
{
StringBuilder processBuffer = new StringBuilder(20);
StringBuilder charBuffer = new StringBuilder(1024);
char[] read_buffer = new char[1024];
/**
* The target values if doing regression
*/
DoubleList regressionTargets = new DoubleList();
/**
* The target values if doing classification
*/
IntList catTargets = new IntList();
/**
* Fist mapping is for each column that contains categorical variables.
* The value map is a mapping from each string to its index, based on order seen.
*/
Map<Integer, Map<String, Integer>> seenCats = new HashMap<>();
for(int col : cat_col)
if(col != cat_target)
seenCats.put(col, new HashMap<>());
/**
* a mapping from each string to its index, based on order seen, for the target class
*/
Map<String, Integer> seenCats_target = new HashMap<>();
/**
*
*/
Map<Integer, Integer> cat_indx_to_csv_column = new HashMap<>();
STATE state = STATE.INITIAL;
int position = 0;
/**
* Negative value used to indicate that we don't know how many columns
* there are yet. Once we process a single row, we set the number of
* columns seen so we can sanity check
*/
int totalCols = -1;
DoubleList numericFeats = new DoubleList();
IntList catFeats = new IntList();
int cur_column = 0;
List<Vec> all_vecs = new ArrayList<>();
List<int[]> all_cats = new ArrayList<>();
while(true)
{
while(charBuffer.length()-position <= 1)//make sure we have chars to handle
{
//move everything to the front
charBuffer.delete(0, position);
position = 0;
int read = reader.read(read_buffer);
if(read < 0)
break;
charBuffer.append(read_buffer, 0, read);
}
if(charBuffer.length()-position == 0)//EOF, no more chars
{
//Look at the last state we were in before EOF
if(state == STATE.NEWLINE)
{
//nothing to do and everything already processed, just return
break;
}
else if(state == STATE.COMMENT)
{
break;///nothing to do, values should have already been added once we transition to comment state
}
else if(state == STATE.VALUE)//line ended in the middle of processing
{
charBuffer.append("\n");//append the wanted newline and let it run thought like normal
}
else
throw new RuntimeException();
}
//Normal processing of states
char ch = charBuffer.charAt(position);
switch(state)
{
case INITIAL:
if(lines_to_skip > 0)
state = STATE.SKIPPING_ROWS;
else
state = STATE.VALUE;
break;
case COMMENT://comment behaves basically the same as SKIPPING ROWS
case SKIPPING_ROWS:
if(isNewLine(ch))
{
if(state == STATE.SKIPPING_ROWS)
lines_to_skip--;
state = STATE.NEWLINE;
}
else
{
//keep moving till we hit a new line
position++;
}
break;
case VALUE:
if(ch == delimiter || isNewLine(ch) || ch == comment )
{
//trim all the white space from the end of what we have been reading
while(processBuffer.length() > 0 && isWhitespace(processBuffer.charAt(processBuffer.length()-1)))
processBuffer.setLength(processBuffer.length()-1);
//clean up the value we are looking at
if(cat_col.contains(cur_column) || cur_column == cat_target)
{
Map<String, Integer> map = (cur_column == cat_target) ? seenCats_target : seenCats.get(cur_column);
String cat_op = processBuffer.toString();
processBuffer.setLength(0);
int val;
if(cat_op.length() == 0)
val = -1;
else
{
if(!map.containsKey(cat_op))
map.put(cat_op, map.size());
val = map.get(cat_op);
}
if (cur_column == cat_target)
if (val == -1)
throw new RuntimeException("Categorical column can't have missing values!");
else
catTargets.add(val);
else
catFeats.add(val);
if(cur_column != cat_target)
cat_indx_to_csv_column.put(catFeats.size()-1, cur_column);
}
else//numeric feature
{
double val;
if(processBuffer.length() == 0)
val = Double.NaN;
else
val = StringUtils.parseDouble(processBuffer, 0, processBuffer.length());
processBuffer.setLength(0);
if(cur_column == numeric_target)
{
regressionTargets.add(val);
}
else//normal storage
{
numericFeats.add(val);
}
}
//now do the state transitions
if(ch == delimiter)
state = STATE.DELIMITER;
else
{
if(ch == comment)
state = STATE.COMMENT;
else
state = STATE.NEWLINE;
if(totalCols < 0)
totalCols = cur_column+1;
else if(totalCols != cur_column+1)
throw new RuntimeException("Inconsistent number of columns in CSV");
//add out stuff to the list
all_vecs.add(new DenseVector(numericFeats));
int[] cat_vals = new int[catFeats.size()];
for(int i = 0; i <cat_vals.length; i++)
cat_vals[i] = catFeats.getI(i);
all_cats.add(cat_vals);
numericFeats.clear();
catFeats.clear();
}
}
else//process a character value
{
if(processBuffer.length() == 0 && Character.isWhitespace(ch))
{
//don't add leading whitespace to the buffer, just move to next char
position++;
}
else//normal value, add to buffer and increment to next char
{
processBuffer.append(ch);
position++;
}
}
break;
case DELIMITER:
if(ch == delimiter)
{
position++;
cur_column++;
state = STATE.VALUE;
}
else
throw new RuntimeException("BAD CSV");//how did we get here?
break;
case NEWLINE:
cur_column = 0;
if (isNewLine(ch))
position++;
else//now we move to next state
{
if (lines_to_skip > 0)
{
//keep skipping until we are out of lines to skip
state = STATE.SKIPPING_ROWS;
}
else
{
state = STATE.VALUE;
}
}
break;
}
}
//ok, we read everything in - clean up time on the categorical features
/**
* we will sort each set of seen options so that we get the same feature
* index ordering regardless of the order they occurred in the data
*/
Map<Integer, Map<Integer, Integer>> cat_true_index = new HashMap<>();
Map<Integer, CategoricalData> catDataMap = new HashMap<>();
if(cat_target >= 0)//added so it gets processed easily below
seenCats.put(cat_target, seenCats_target);
CategoricalData target_data = null;
for( Map.Entry<Integer, Map<String, Integer>> main_entry : seenCats.entrySet())
{
HashMap<Integer, Integer> translator = new HashMap<>();
int col = main_entry.getKey();
Map<String, Integer> catsSeen = main_entry.getValue();
List<String> sortedOrder = new ArrayList<>(catsSeen.keySet());
Collections.sort(sortedOrder);
CategoricalData cd = new CategoricalData(sortedOrder.size());
if(col != cat_target)
catDataMap.put(col, cd);
else
target_data = cd;
for(int i = 0; i < sortedOrder.size(); i++)
{
translator.put(catsSeen.get(sortedOrder.get(i)), i);
cd.setOptionName(sortedOrder.get(i), i);
}
cat_true_index.put(col, translator);
}
//go through and convert everything
for(int[] cat_vals : all_cats)
{
for(int i = 0; i < cat_vals.length; i++)
{
if(cat_vals[i] >= 0)//if -1 its a missing value
cat_vals[i] = cat_true_index.get(cat_indx_to_csv_column.get(i)).get(cat_vals[i]);
}
}
if(cat_target >= 0)//clean up the target value as well
{
Map<Integer, Integer> translator = cat_true_index.get(cat_target);
for(int i = 0; i < catTargets.size(); i++)
catTargets.set(i, translator.get(catTargets.get(i)));
}
//collect the categorical variable headers
CategoricalData[] cat_array = new CategoricalData[catDataMap.size()];
for(int i = 0; i < cat_array.length; i++)
cat_array[i]= catDataMap.get(cat_indx_to_csv_column.get(i));
if(cat_target >= 0)
{
ClassificationDataSet d = new ClassificationDataSet(totalCols - cat_array.length-1, cat_array, target_data);
d.setDataStore(DataStore.DEFAULT_STORE.emptyClone());
for (int i = 0; i < all_vecs.size(); i++)
d.addDataPoint(all_vecs.get(i), all_cats.get(i), catTargets.getI(i));
return d;
}
else if (numeric_target >= 0)
{
RegressionDataSet d = new RegressionDataSet(totalCols - cat_array.length - 1, cat_array);
d.setDataStore(DataStore.DEFAULT_STORE.emptyClone());
for (int i = 0; i < all_vecs.size(); i++)
d.addDataPoint(all_vecs.get(i), all_cats.get(i), regressionTargets.getD(i));
return d;
}
else
{
SimpleDataSet d = new SimpleDataSet(totalCols - cat_array.length, cat_array);
d.setDataStore(DataStore.DEFAULT_STORE.emptyClone());
for (int i = 0; i < all_vecs.size(); i++)
d.add(new DataPoint(all_vecs.get(i), all_cats.get(i), cat_array));
return d;
}
}
/**
* Writes out the given dataset as a CSV file. If the given dataset is a
* regression or classification dataset, the target feature that is being
* predicted will always be written out as the first index in the CSV. <br>
* After that, all numeric features will be written out in order, followed
* by the categorical features.
*
* @param data the dataset object to save as a CSV file
* @param path the path to write the CSV to
* @throws IOException
*/
public static void write(DataSet<?> data, Path path) throws IOException
{
write(data, path, DEFAULT_DELIMITER);
}
/**
* Writes out the given dataset as a CSV file. If the given dataset is a
* regression or classification dataset, the target feature that is being
* predicted will always be written out as the first index in the CSV. <br>
* After that, all numeric features will be written out in order, followed
* by the categorical features.
*
* @param data the dataset object to save as a CSV file
* @param writer the output writer to write the CSV to
* @throws IOException
*/
public static void write(DataSet<?> data, Writer writer) throws IOException
{
write(data, writer, DEFAULT_DELIMITER);
}
/**
* Writes out the given dataset as a CSV file. If the given dataset is a
* regression or classification dataset, the target feature that is being
* predicted will always be written out as the first index in the CSV. <br>
* After that, all numeric features will be written out in order, followed
* by the categorical features.
*
* @param data the dataset object to save as a CSV file
* @param path the path to write the CSV to
* @param delimiter the delimiter between column values, normally a comma
* @throws IOException
*/
public static void write(DataSet<?> data, Path path, char delimiter) throws IOException
{
BufferedWriter bw = Files.newBufferedWriter(path, Charset.defaultCharset());
write(data, bw, delimiter);
bw.close();
}
/**
* Writes out the given dataset as a CSV file. If the given dataset is a
* regression or classification dataset, the target feature that is being
* predicted will always be written out as the first index in the CSV. <br>
* After that, all numeric features will be written out in order, followed
* by the categorical features.
*
* @param data the dataset object to save as a CSV file
* @param writer the output writer to write the CSV to
* @param delimiter the delimiter between column values, normally a comma
* @throws IOException
*/
public static void write(DataSet<?> data, Writer writer, char delimiter) throws IOException
{
//first, create safe categorical feature names to write out
String[][] catNamesToUse = getSafeNames(data.getCategories(), delimiter);
String[] classNames = null;
if(data instanceof ClassificationDataSet)
classNames = getSafeNames(new CategoricalData[]{((ClassificationDataSet)data).getPredicting()}, delimiter)[0];
//write out every data point
for(int i = 0; i < data.size(); i++)
{
if(i > 0)//write newline first
writer.write('\n');
boolean nothingWrittenYet = true;
//target feature always goes at the front
if(data instanceof ClassificationDataSet)
{
int targetClass = ((ClassificationDataSet)data).getDataPointCategory(i);
writer.write(classNames[targetClass]);
nothingWrittenYet = false;
}
else if(data instanceof RegressionDataSet)
{
double targetVal = ((RegressionDataSet)data).getTargetValue(i);
writer.write(Double.toString(targetVal));
nothingWrittenYet = false;
}
DataPoint dp = data.getDataPoint(i);
Vec v =dp.getNumericalValues();
int[] c = dp.getCategoricalValues();
//write out numeric features first
for(int j = 0; j < v.length(); j++)
{
if(!nothingWrittenYet)
writer.write(delimiter);
//bellow handles NaN correctly, rint will just return NaN and then toString prints "NaN"
double val = v.get(j);
if(Math.rint(val) == val)//cast to long before writting to save space
writer.write(Long.toString((long) val));
else
writer.write(Double.toString(val));
nothingWrittenYet = false;
}
//then categorical features, useing the safe names we constructed earlier
for(int j = 0; j < c.length; j++)
{
if(!nothingWrittenYet)
writer.write(delimiter);
if(c[j] >= 0)
writer.write(catNamesToUse[j][c[j]]);
//else, its negative - which is missing, so not writing anything out should result in the correct behavior
nothingWrittenYet = false;
}
}
writer.flush();
}
/**
* Returns a DataWriter object which can be used to stream a set of
* arbitrary datapoints into the given output stream. This works in a thread
* safe manner. Uses the default delimiter {@link #DEFAULT_DELIMITER}
*
* @param out the location to store all the data
* @param catInfo information about the categorical features to be written
* @param dim information on how many numeric features exist
* @param predicting information on the class label, may be {@code null} if not a classification dataset
* @param type what type of data set (simple, classification, regression) to be written
* @return the DataWriter that the actual points can be streamed through
* @throws IOException
*/
static public DataWriter getWriter(OutputStream out, CategoricalData[] catInfo, int dim, CategoricalData predicting, DataWriter.DataSetType type) throws IOException
{
return getWriter(out, catInfo, dim, predicting, type, DEFAULT_DELIMITER);
}
/**
* Returns a DataWriter object which can be used to stream a set of
* arbitrary datapoints into the given output stream. This works in a thread
* safe manner.
*
* @param out the location to store all the data
* @param catInfo information about the categorical features to be written
* @param dim information on how many numeric features exist
* @param predicting information on the class label, may be {@code null} if not a classification dataset
* @param type what type of data set (simple, classification, regression) to be written
* @param delimiter the character delimiter between features
* @return the DataWriter that the actual points can be streamed through
* @throws IOException
*/
static public DataWriter getWriter(OutputStream out, CategoricalData[] catInfo, int dim, CategoricalData predicting, DataWriter.DataSetType type, final char delimiter) throws IOException
{
//first, create safe categorical feature names to write out
final String[][] catNamesToUse = getSafeNames(catInfo, delimiter);
final String[] classNames;
if(DataWriter.DataSetType.CLASSIFICATION == type)
{
if(predicting == null)
throw new RuntimeException("Can't create CSV writer without prediction target information (was null) ");
classNames = getSafeNames(new CategoricalData[]{predicting}, delimiter)[0];
}
else
classNames = null;
DataWriter dw = new DataWriter(out, catInfo, dim, type)
{
@Override
protected void writeHeader(CategoricalData[] catInfo, int dim, DataWriter.DataSetType type, OutputStream out)
{
//CSV format has no header!
}
@Override
protected void pointToBytes(double weight, DataPoint dp, double label, ByteArrayOutputStream byteOut)
{
PrintWriter writer = new PrintWriter(byteOut);
boolean nothingWrittenYet = true;
//target feature always goes at the front
if(type == DataWriter.DataSetType.CLASSIFICATION)
{
int targetClass = (int) label;
writer.write(classNames[targetClass]);
nothingWrittenYet = false;
}
else if(type == DataWriter.DataSetType.REGRESSION)
{
double targetVal = label;
writer.write(Double.toString(targetVal));
nothingWrittenYet = false;
}
Vec v =dp.getNumericalValues();
int[] c = dp.getCategoricalValues();
//write out numeric features first
for(int j = 0; j < v.length(); j++)
{
if(!nothingWrittenYet)
writer.write(delimiter);
//bellow handles NaN correctly, rint will just return NaN and then toString prints "NaN"
double val = v.get(j);
if(Math.rint(val) == val)//cast to long before writting to save space
writer.write(Long.toString((long) val));
else
writer.write(Double.toString(val));
nothingWrittenYet = false;
}
//then categorical features, useing the safe names we constructed earlier
for(int j = 0; j < c.length; j++)
{
if(!nothingWrittenYet)
writer.write(delimiter);
if(c[j] >= 0)
writer.write(catNamesToUse[j][c[j]]);
//else, its negative - which is missing, so not writing anything out should result in the correct behavior
nothingWrittenYet = false;
}
writer.write("\n");
writer.flush();
}
};
return dw;
}
private static String[][] getSafeNames(CategoricalData[] cats, char delimiter)
{
String[][] catNamesToUse = new String[cats.length][];
final char delim_replacement;
if(delimiter == '_')//avoid setting the replacment to the deliminater value itself!
delim_replacement = '-';
else
delim_replacement = '_';
for(int i = 0; i < catNamesToUse.length; i++)
{
catNamesToUse[i] = new String[cats[i].getNumOfCategories()];
for(int j = 0; j < catNamesToUse[i].length; j++)
{
String name = cats[i].getOptionName(j).trim();
if(name.contains(String.valueOf(delimiter)))
name = name.replace(delimiter, delim_replacement);
catNamesToUse[i][j] = name;
}
}
return catNamesToUse;
}
private static boolean isNewLine(char ch)
{
return ch =='\n' || ch == '\r';
}
/**
* Simple state machine used to parse CSV files
*/
private enum STATE
{
/**
* Initial state, doesn't actually do anything
*/
INITIAL,
/**
* Used when we start and want to skip some fixed number of rows in the file
*/
SKIPPING_ROWS,
VALUE,
DELIMITER,
NEWLINE,
/**
* When we encounter the comment start character, run till we hit the end of the line
*/
COMMENT,
}
}
| 38,962 | 42.292222 | 190 | java |
JSAT | JSAT-master/JSAT/src/jsat/io/DataWriter.java | /*
* Copyright (C) 2017 edraff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.io;
import java.io.ByteArrayOutputStream;
import java.io.Closeable;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.DataPoint;
/**
* This interface defines a contract by which datapoints may be written out to a
* dataset file (such as a CSV) in an incremental fashion. This object is thread
* safe for all methods.
*
* @author Edward Raff
*/
public abstract class DataWriter implements Closeable
{
/**
* Use a 10MB local buffer for each thread
*/
private static final int LOCAL_BUFFER_SIZE = 1024*1024*10;
/**
* The list of all local buffers, used to make sure all data makes it out when {@link #finish() } is called.
*/
protected List<ByteArrayOutputStream> all_buffers = Collections.synchronizedList(new ArrayList<>());
/**
* The destination to ultimately write the dataset to
*/
protected final OutputStream out;
/**
* The type of dataset to be written out
*/
public final DataSetType type;
/**
* the categorical feature information for the whole corpus
*/
public final CategoricalData[] catInfo;
/**
* the number of numeric features for the whole corpus
*/
public final int dim;
public DataWriter(OutputStream out, CategoricalData[] catInfo, int dim, DataSetType type) throws IOException
{
this.out = out;
this.type = type;
this.catInfo = catInfo;
this.dim = dim;
writeHeader(catInfo, dim, type, out);
}
/**
* The local buffers that writing will be done to.
*/
protected ThreadLocal<ByteArrayOutputStream> local_baos = new ThreadLocal<ByteArrayOutputStream>()
{
@Override
protected ByteArrayOutputStream initialValue()
{
ByteArrayOutputStream baos = new ByteArrayOutputStream(LOCAL_BUFFER_SIZE);
all_buffers.add(baos);
return baos;
}
};
abstract protected void writeHeader(CategoricalData[] catInfo, int dim, DataSetType type, OutputStream out) throws IOException;
/**
* Write out the given data point to the output stream
* @param dp the data point to write to the file
* @param label The associated label for this dataum. If {@link #type} is a
* {@link DataSetType#SIMPLE} set, this value will be ignored. If
* {@link DataSetType#CLASSIFICATION}, the value will be assumed to be an
* integer class label.
* @throws java.io.IOException
*/
public void writePoint(DataPoint dp, double label) throws IOException
{
writePoint(1.0, dp, label);
}
/**
* Write out the given data point to the output stream
* @param weight weight of the given data point to write out
* @param dp the data point to write to the file
* @param label The associated label for this dataum. If {@link #type} is a
* {@link DataSetType#SIMPLE} set, this value will be ignored. If
* {@link DataSetType#CLASSIFICATION}, the value will be assumed to be an
* integer class label.
* @throws java.io.IOException
*/
public void writePoint(double weight, DataPoint dp, double label) throws IOException
{
ByteArrayOutputStream baos = local_baos.get();
pointToBytes(weight, dp, label, baos);
if(baos.size() >= LOCAL_BUFFER_SIZE)//We've got a big chunk of data, lets dump it
synchronized(out)
{
baos.writeTo(out);
baos.reset();
}
}
/**
* This method converts a datapoint into the sequence of bytes used by the underlying file format.
* @param weight weight of the given data point to write out
* @param dp the data point to be converted to set of bytes
* @param label the label of the point to convert to the set of bytes
* @param byteOut the location to write the bytes to.
*/
abstract protected void pointToBytes(double weight, DataPoint dp, double label, ByteArrayOutputStream byteOut);
/**
* To be called after all threads are done calling {@link #writePoint(jsat.classifiers.DataPoint, double) }.
*/
public synchronized void finish() throws IOException
{
synchronized(out)
{
for(ByteArrayOutputStream baos : all_buffers)
{
baos.writeTo(out);
baos.reset();
}
out.flush();
}
}
@Override
public void close() throws IOException
{
finish();
out.close();
}
public static enum DataSetType
{
SIMPLE, CLASSIFICATION, REGRESSION
}
}
| 5,502 | 32.554878 | 131 | java |
JSAT | JSAT-master/JSAT/src/jsat/io/JSATData.java | /*
* Copyright (C) 2015 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.io;
import java.io.*;
import java.util.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;
import jsat.*;
import jsat.classifiers.*;
import jsat.linear.*;
import jsat.regression.RegressionDataSet;
import jsat.utils.DoubleList;
import jsat.utils.IntList;
/**
* JSAT Data Loader provides a simple binary file format for storing and reading
* datasets. All datasets can always be read back in as a {@link SimpleDataSet},
* and {@link ClassificationDataSet} and {@link RegressionDataSet} datasets can
* be read back in as their original types.<br>
* <br>
* For well behaved datasets (where most numeric features are integer values),
* an uncompressed JSAT Dataset file may be larger than a similar
* {@link ARFFLoader ARFF} or {@link LIBSVMLoader LIBSVM} file. This is because
* JSAT always uses 32 or 64 bits (4 or 8 bytes) for every value, where values
* stored as a string could use as little as 1 or 2 bytes for simple values.
* However, JSAT's storage will be consistent - data which uses the floating
* point values (such as the number "0.25098039215686274") will use additional
* bytes in the human readable ARFF and LIBSVM formats that are not necessary,
* where the binary JSAT format will stay the same size.<br>
* <br>
* Even when JSAT produces larger files, since it is a simple binary format,
* reading and writing will usually be significantly faster.
* <br>
* Additional storage savings can be obtained by using the
* {@link GZIPOutputStream} when storing a dataset, and then decompressed when
* read back in using {@link GZIPInputStream}.
* <br>
*
*
*
* @author Edward Raff
*/
public class JSATData
{
private JSATData()
{
}
public static final byte[] MAGIC_NUMBER = new byte[]
{
'J', 'S', 'A', 'T', '_', '0', '0'
};
public static enum DatasetTypeMarker
{
STANDARD,
REGRESSION,
CLASSIFICATION;
}
public static enum FloatStorageMethod
{
AUTO
{
@Override
protected void writeFP(double value, DataOutputStream out) throws IOException
{
//Auto dosn't actually write! Auto just means figure out the best
throw new UnsupportedOperationException("Not supported .");
}
@Override
protected double readFP(DataInputStream in) throws IOException
{
//Auto dosn't actually write! Auto just means figure out the best
throw new UnsupportedOperationException("Not supported .");
}
@Override
protected boolean noLoss(double orig)
{
return true;
}
},
FP64
{
@Override
protected void writeFP(double value, DataOutputStream out) throws IOException
{
out.writeDouble(value);
}
@Override
protected double readFP(DataInputStream in) throws IOException
{
return in.readDouble();
}
@Override
protected boolean noLoss(double orig)
{
return true;
}
},
FP32
{
@Override
protected void writeFP(double value, DataOutputStream out) throws IOException
{
out.writeFloat((float) value);
}
@Override
protected double readFP(DataInputStream in) throws IOException
{
return in.readFloat();
}
@Override
protected boolean noLoss(double orig)
{
//below can only return true if ther eis no loss in storing these values as 32 bit floats instead of doubles
float f_o = (float) orig;
return Double.valueOf(f_o)-orig == 0.0;
}
},
SHORT
{
@Override
protected void writeFP(double value, DataOutputStream out) throws IOException
{
out.writeShort(Math.min(Math.max((int)value, Short.MIN_VALUE), Short.MAX_VALUE));
}
@Override
protected double readFP(DataInputStream in) throws IOException
{
return in.readShort();
}
@Override
protected boolean noLoss(double orig)
{
return Short.MIN_VALUE <= orig && orig <= Short.MAX_VALUE && orig == Math.rint(orig);
}
},
BYTE
{
@Override
protected void writeFP(double value, DataOutputStream out) throws IOException
{
out.writeByte(Math.min(Math.max((int)value, Byte.MIN_VALUE), Byte.MAX_VALUE));
}
@Override
protected double readFP(DataInputStream in) throws IOException
{
return in.readByte();
}
@Override
protected boolean noLoss(double orig)
{
return Byte.MIN_VALUE <= orig && orig <= Byte.MAX_VALUE && orig == Math.rint(orig);
}
},
U_BYTE
{
@Override
protected void writeFP(double value, DataOutputStream out) throws IOException
{
out.writeByte(Math.min(Math.max((int)value, 0), 255));
}
@Override
protected double readFP(DataInputStream in) throws IOException
{
return in.readByte() & 0xff;
}
@Override
protected boolean noLoss(double orig)
{
return 0 <= orig && orig <= 255 && orig == Math.rint(orig);
}
};
abstract protected void writeFP(double value, DataOutputStream out) throws IOException;
abstract protected double readFP(DataInputStream in) throws IOException;
abstract protected boolean noLoss(double orig);
static public <Type extends DataSet<Type>> FloatStorageMethod getMethod(DataSet<Type> data, FloatStorageMethod method)
{
if (method == FloatStorageMethod.AUTO)//figure out what storage method to use!
{
EnumSet<FloatStorageMethod> storageCandidates = EnumSet.complementOf(EnumSet.of(FloatStorageMethod.AUTO));
//loop through all the data and remove invalid candidates
for(int i = 0; i < data.size(); i++)
{
DataPoint dp = data.getDataPoint(i);
for (IndexValue iv : dp.getNumericalValues())
{
Iterator<FloatStorageMethod> iter = storageCandidates.iterator();
while (iter.hasNext())
{
if (!iter.next().noLoss(iv.getValue()))
iter.remove();
}
if (storageCandidates.size() == 1)
break;
}
Iterator<FloatStorageMethod> iter = storageCandidates.iterator();
while (iter.hasNext())
{
if (!iter.next().noLoss(data.getWeight(i)))
iter.remove();
}
if (storageCandidates.size() == 1)
break;
}
if(data instanceof RegressionDataSet)
{
for(IndexValue iv : ((RegressionDataSet)data).getTargetValues())
{
Iterator<FloatStorageMethod> iter = storageCandidates.iterator();
while (iter.hasNext())
{
if (!iter.next().noLoss(iv.getValue()))
iter.remove();
}
if (storageCandidates.size() == 1)
break;
}
}
if(storageCandidates.contains(BYTE))
return BYTE;
else if(storageCandidates.contains(U_BYTE))
return U_BYTE;
else if(storageCandidates.contains(SHORT))
return SHORT;
else if(storageCandidates.contains(FP32))
return FP32;
return FP64;
}
else
return method;
}
}
public static final byte STRING_ENCODING_ASCII = 0;
public static final byte STRING_ENCODING_UTF_16 = 1;
/**
* This method writes out a JSAT dataset to a binary format that can be read
* in again later, and could be read in other languages.<br>
* <br>
* The format that is used will understand both
* {@link ClassificationDataSet} and {@link RegressionDataSet} datasets as
* special cases, and will store the target values in the binary file. When
* read back in, they can be returned as their original dataset type, or
* treated as normal fields as a {@link SimpleDataSet}.<br>
* The storage format chosen for floating point values will chose a method
* that results in no loss of precision when reading the data back in.
*
* @param <Type>
* @param dataset the dataset to write out to a binary file
* @param outRaw the raw output stream, the caller should provide a buffered
* stream.
* @throws IOException
*/
public static <Type extends DataSet<Type>> void writeData(DataSet<Type> dataset, OutputStream outRaw) throws IOException
{
writeData(dataset, outRaw, FloatStorageMethod.AUTO);
}
/**
* This method writes out a JSAT dataset to a binary format that can be read
* in again later, and could be read in other languages.<br>
* <br>
* The format that is used will understand both
* {@link ClassificationDataSet} and {@link RegressionDataSet} datasets as
* special cases, and will store the target values in the binary file. When
* read back in, they can be returned as their original dataset type, or
* treated as normal fields as a {@link SimpleDataSet}.
*
* @param <Type>
* @param dataset the dataset to write out to a binary file
* @param outRaw the raw output stream, the caller should provide a buffered
* stream.
* @param fpStore the storage method of storing floating point values, which
* may result in a loss of precision depending on the method chosen.
* @throws IOException
*/
public static <Type extends DataSet<Type>> void writeData(DataSet<Type> dataset, OutputStream outRaw, FloatStorageMethod fpStore) throws IOException
{
fpStore = FloatStorageMethod.getMethod(dataset, fpStore);
DataWriter.DataSetType type;
CategoricalData predicting;
if(dataset instanceof ClassificationDataSet)
{
type = DataWriter.DataSetType.CLASSIFICATION;
predicting = ((ClassificationDataSet)dataset).getPredicting();
}
else if(dataset instanceof RegressionDataSet)
{
type = DataWriter.DataSetType.REGRESSION;
predicting = null;
}
else
{
type = DataWriter.DataSetType.SIMPLE;
predicting = null;
}
DataWriter dw = getWriter(outRaw, dataset.getCategories(), dataset.getNumNumericalVars(), predicting, fpStore, type);
//write out all the datapoints
for(int i = 0; i < dataset.size(); i++)
{
double label = 0;
if (dataset instanceof ClassificationDataSet)
label = ((ClassificationDataSet) dataset).getDataPointCategory(i);
else if (dataset instanceof RegressionDataSet)
label = ((RegressionDataSet) dataset).getTargetValue(i);
dw.writePoint(dataset.getWeight(i), dataset.getDataPoint(i), label);
}
dw.finish();
outRaw.flush();
}
/**
* Returns a DataWriter object which can be used to stream a set of arbitrary datapoints into the given output stream. This works in a thread safe manner.
*
* @param out the location to store all the data
* @param catInfo information about the categorical features to be written
* @param dim information on how many numeric features exist
* @param predicting information on the class label, may be {@code null} if not a classification dataset
* @param fpStore the format floating point values should be stored as
* @param type what type of data set (simple, classification, regression) to be written
* @return the DataWriter that the actual points can be streamed through
* @throws IOException
*/
public static DataWriter getWriter(OutputStream out, CategoricalData[] catInfo, int dim, final CategoricalData predicting, final FloatStorageMethod fpStore, DataWriter.DataSetType type) throws IOException
{
return new DataWriter(out, catInfo, dim, type)
{
@Override
protected void writeHeader(CategoricalData[] catInfo, int dim, DataWriter.DataSetType type, OutputStream out) throws IOException
{
DataOutputStream data_out = new DataOutputStream(out);
data_out.write(JSATData.MAGIC_NUMBER);
int numNumeric = dim;
int numCat = catInfo.length;
DatasetTypeMarker marker = DatasetTypeMarker.STANDARD;
if(type == type.REGRESSION)
{
numNumeric++;
marker = DatasetTypeMarker.REGRESSION;
}
if(type == type.CLASSIFICATION)
{
numCat++;
marker = DatasetTypeMarker.CLASSIFICATION;
}
data_out.writeByte(marker.ordinal());
data_out.writeByte(fpStore.ordinal());
data_out.writeInt(numNumeric);
data_out.writeInt(numCat);
data_out.writeInt(-1);//-1 used to indicate a potentially variable number of files
for(CategoricalData category : catInfo)
{
//first, whats the name of the i'th category
writeString(category.getCategoryName(), data_out);
data_out.writeInt(category.getNumOfCategories());//output the number of categories
for(int i = 0; i < category.getNumOfCategories(); i++)//the option names
writeString(category.getOptionName(i), data_out);
}
//extra for classification dataset
if(type == DataWriter.DataSetType.CLASSIFICATION)
{
CategoricalData category = predicting;
//first, whats the name of the i'th category
writeString(category.getCategoryName(), data_out);
data_out.writeInt(category.getNumOfCategories());//output the number of categories
for(int i = 0; i < category.getNumOfCategories(); i++)//the option names
writeString(category.getOptionName(i), data_out);
}
data_out.flush();
}
@Override
protected void pointToBytes(double weight, DataPoint dp, double label, ByteArrayOutputStream byteOut)
{
try
{
DataOutputStream data_out = new DataOutputStream(byteOut);
fpStore.writeFP(weight, data_out);
for(int val : dp.getCategoricalValues())
data_out.writeInt(val);
if(type == DataWriter.DataSetType.CLASSIFICATION)
data_out.writeInt((int) label);
Vec numericVals = dp.getNumericalValues();
data_out.writeBoolean(numericVals.isSparse());
if(numericVals.isSparse())
{
if(type == DataWriter.DataSetType.REGRESSION)
data_out.writeInt(numericVals.nnz()+1);//+1 for the target value, which may actually be zero...
else
data_out.writeInt(numericVals.nnz());
for(IndexValue iv : numericVals)
{
data_out.writeInt(iv.getIndex());
fpStore.writeFP(iv.getValue(), data_out);
}
}
else
{
for(int j = 0; j < numericVals.length(); j++)
fpStore.writeFP(numericVals.get(j), data_out);
}
//append the target value
if(type == DataWriter.DataSetType.REGRESSION)
{
/*
* if dense, we only need to just add the extra double. If
* sparse, we do the index and then the double.
*/
if (numericVals.isSparse())
data_out.writeInt(numericVals.length());
fpStore.writeFP(label, data_out);
}
data_out.flush();
}
catch (IOException ex)
{
Logger.getLogger(JSATData.class.getName()).log(Level.SEVERE, null, ex);
}
}
};
}
/**
* This loads a JSAT dataset from an input stream, and will not do any of
* its own buffering. The DataSet will be returned as either a
* {@link SimpleDataSet}, {@link ClassificationDataSet}, or
* {@link RegressionDataSet} depending on what type of dataset was
* originally written out.<br>
*
* @param inRaw the input stream, caller should buffer it
* @return a dataset
* @throws IOException
*/
public static DataSet<?> load(InputStream inRaw) throws IOException
{
return load(inRaw, DataStore.DEFAULT_STORE.emptyClone());
}
/**
* This loads a JSAT dataset from an input stream, and will not do any of
* its own buffering. The DataSet will be returned as either a
* {@link SimpleDataSet}, {@link ClassificationDataSet}, or
* {@link RegressionDataSet} depending on what type of dataset was
* originally written out.<br>
*
* @param inRaw the input stream, caller should buffer it
* @param backingStore the data store to put all datapoints in
* @return a dataset
* @throws IOException
*/
public static DataSet<?> load(InputStream inRaw, DataStore backingStore) throws IOException
{
return load(inRaw, false, backingStore);
}
/**
* Loads in a JSAT dataset as a {@link SimpleDataSet}. So long as the input
* stream is valid, this will not fail.
*
* @param inRaw the input stream, caller should buffer it
* @return a SimpleDataSet object
* @throws IOException
*/
public static SimpleDataSet loadSimple(InputStream inRaw) throws IOException
{
return loadSimple(inRaw, new RowMajorStore());
}
/**
* Loads in a JSAT dataset as a {@link SimpleDataSet}. So long as the input
* stream is valid, this will not fail.
*
* @param inRaw the input stream, caller should buffer it
* @param backingStore the data store to put all data points in
* @return a SimpleDataSet object
* @throws IOException
*/
public static SimpleDataSet loadSimple(InputStream inRaw, DataStore backingStore) throws IOException
{
return (SimpleDataSet) load(inRaw, true, backingStore);
}
/**
* Loads in a JSAT dataset as a {@link ClassificationDataSet}. An exception
* will be thrown if the original dataset in the file was not a
* {@link ClassificationDataSet}.
*
* @param inRaw the input stream, caller should buffer it
* @return a ClassificationDataSet object
* @throws IOException
* @throws ClassCastException if the original dataset was a not a ClassificationDataSet
*/
public static ClassificationDataSet loadClassification(InputStream inRaw) throws IOException
{
return loadClassification(inRaw, new RowMajorStore());
}
/**
* Loads in a JSAT dataset as a {@link ClassificationDataSet}. An exception
* will be thrown if the original dataset in the file was not a
* {@link ClassificationDataSet}.
*
* @param inRaw the input stream, caller should buffer it
* @param backingStore the data store to put all data points in
* @return a ClassificationDataSet object
* @throws IOException
* @throws ClassCastException if the original dataset was a not a ClassificationDataSet
*/
public static ClassificationDataSet loadClassification(InputStream inRaw, DataStore backingStore) throws IOException
{
return (ClassificationDataSet) load(inRaw, backingStore);
}
/**
* Loads in a JSAT dataset as a {@link RegressionDataSet}. An exception
* will be thrown if the original dataset in the file was not a
* {@link RegressionDataSet}.
*
* @param inRaw the input stream, caller should buffer it
* @return a RegressionDataSet object
* @throws IOException
* @throws ClassCastException if the original dataset was a not a RegressionDataSet
*/
public static RegressionDataSet loadRegression(InputStream inRaw) throws IOException
{
return loadRegression(inRaw, new RowMajorStore());
}
/**
* Loads in a JSAT dataset as a {@link RegressionDataSet}. An exception
* will be thrown if the original dataset in the file was not a
* {@link RegressionDataSet}.
*
* @param inRaw the input stream, caller should buffer it
* @param backingStore the data store to put all data points in
* @return a RegressionDataSet object
* @throws IOException
* @throws ClassCastException if the original dataset was a not a RegressionDataSet
*/
public static RegressionDataSet loadRegression(InputStream inRaw, DataStore backingStore) throws IOException
{
return (RegressionDataSet) load(inRaw, backingStore);
}
/**
* This loads a JSAT dataset from an input stream, and will not do any of
* its own buffering. The DataSet will be returned as either a
* {@link SimpleDataSet}, {@link ClassificationDataSet}, or
* {@link RegressionDataSet} depending on what type of dataset was
* originally written out.<br>
* <br>
* This method supports forcing the load to return a {@link SimpleDataSet}.
* <br>
* This method uses a {@link RowMajorStore} store for the data.
*
* @param inRaw the input stream, caller should buffer it
* @param forceAsStandard {@code true} for for the dataset to be loaded as a
* {@link SimpleDataSet}, otherwise it will be determined based on the input
* streams contents.
* @return a dataset
* @throws IOException
*/
@SuppressWarnings("unchecked")
protected static DataSet<?> load(InputStream inRaw, boolean forceAsStandard) throws IOException
{
return load(inRaw, forceAsStandard, DataStore.DEFAULT_STORE.emptyClone());
}
/**
* This loads a JSAT dataset from an input stream, and will not do any of
* its own buffering. The DataSet will be returned as either a
* {@link SimpleDataSet}, {@link ClassificationDataSet}, or
* {@link RegressionDataSet} depending on what type of dataset was
* originally written out.<br>
* <br>
* This method supports forcing the load to return a {@link SimpleDataSet}.
*
* @param inRaw the input stream, caller should buffer it
* @param forceAsStandard {@code true} for for the dataset to be loaded as a
* {@link SimpleDataSet}, otherwise it will be determined based on the input
* streams contents.
* @param store the backing mechanism to store all the data in and use for the returned dataset object
* @return a dataset
* @throws IOException
*/
@SuppressWarnings("unchecked")
protected static DataSet<?> load(InputStream inRaw, boolean forceAsStandard, DataStore store) throws IOException
{
DataInputStream in = new DataInputStream(inRaw);
byte[] magic_number = new byte[MAGIC_NUMBER.length];
in.readFully(magic_number);
String magic = new String(magic_number, "US-ASCII");
if(!magic.startsWith("JSAT_"))
throw new RuntimeException("data does not contain magic number");
DatasetTypeMarker marker = DatasetTypeMarker.values()[in.readByte()];
FloatStorageMethod fpStore = FloatStorageMethod.values()[in.readByte()];
int numNumeric = in.readInt();
int numCat = in.readInt();
int N = in.readInt();
if(forceAsStandard)
marker = DatasetTypeMarker.STANDARD;
if(marker == DatasetTypeMarker.CLASSIFICATION)
numCat--;
else if(marker == DatasetTypeMarker.REGRESSION)
numNumeric--;
CategoricalData[] categories = new CategoricalData[numCat];
CategoricalData predicting = null;//may not be used
for(int i = 0; i < categories.length; i++)
{
//first, whats the name of the i'th category
String name = readString(in);
int k = in.readInt();//output the number of categories
categories[i] = new CategoricalData(k);
categories[i].setCategoryName(name);
for(int j = 0; j < k; j++)//the option names
categories[i].setOptionName(readString(in), j);
}
if(marker == DatasetTypeMarker.CLASSIFICATION)
{
//first, whats the name of the i'th category
String name = readString(in);
int k = in.readInt();//output the number of categories
predicting = new CategoricalData(k);
predicting.setCategoryName(name);
for(int j = 0; j < k; j++)//the option names
predicting.setOptionName(readString(in), j);
}
//used for both numeric and categorical target storage
DoubleList targets = new DoubleList();
DoubleList weights = new DoubleList();
store.setCategoricalDataInfo(categories);
store.setNumNumeric(numNumeric);
//read in all the data points
if(N < 0)
N = Integer.MAX_VALUE;
try
{
for(int i = 0; i < N; i++)
{
double weight = fpStore.readFP(in);//in.readDouble();
int[] catVals = new int[numCat];
double target = 0;
for(int j = 0; j < catVals.length; j++)
catVals[j] = in.readInt();
if(marker == DatasetTypeMarker.CLASSIFICATION)
{
//int can be stored losselessly in a double, so this is safe
target = in.readInt();
}
boolean sparse = in.readBoolean();
Vec numericVals;
if(sparse)
{
int nnz = in.readInt();
if(marker == DatasetTypeMarker.REGRESSION)
nnz--;//don't count the target value
int[] indicies = new int[nnz];
double[] values = new double[nnz];
for(int j = 0; j < nnz; j++)
{
indicies[j] = in.readInt();
values[j] = fpStore.readFP(in);
}
numericVals = new SparseVector(indicies, values, numNumeric, nnz);
}
else
{
numericVals = new DenseVector(numNumeric);
for(int j = 0; j < numNumeric; j++)
numericVals.set(j, fpStore.readFP(in));
}
//get the target value
if(marker == DatasetTypeMarker.REGRESSION)
{
/*
* if dense, we only need to just add the extra double. If
* sparse, we do the index and then the double.
*/
if (numericVals.isSparse())
in.readInt();//don't care, its the last index value - so its the target
target = fpStore.readFP(in);
}
DataPoint dp = new DataPoint(numericVals, catVals, categories);
weights.add(weight);
store.addDataPoint(dp);
switch(marker)
{
case CLASSIFICATION:
case REGRESSION:
targets.add(target);
default:
break;
}
}
}
catch (EOFException eo)
{
//No problem
}
in.close();
DataSet toRet;
switch(marker)
{
case CLASSIFICATION:
IntList targets_i = IntList.view(targets.stream().mapToInt(Double::intValue).toArray());
toRet = new ClassificationDataSet(store, targets_i, predicting);
break;
case REGRESSION:
toRet = new RegressionDataSet(store, targets);
break;
default:
toRet = new SimpleDataSet(store);
}
for(int i = 0; i < weights.size(); i++)
toRet.setWeight(i, weights.getD(i));
store.finishAdding();
return toRet;
}
private static void writeString(String s, DataOutputStream out) throws IOException
{
boolean isAscii = true;
for(int i = 0; i < s.length() && isAscii; i++)
if(s.charAt(i) >= 256 || s.charAt(i) <= 0)
isAscii = false;
if(isAscii)
{
out.writeByte(STRING_ENCODING_ASCII);
out.writeInt(s.length());//number of bytes of the string
for(int i = 0; i < s.length(); i++)
out.writeByte(s.charAt(i));
}
else//write as UTF-8
{
byte[] bytes = s.getBytes("UTF-16");
out.writeByte(STRING_ENCODING_UTF_16);
out.writeInt(bytes.length);//number of bytes of the string
out.write(bytes);
}
}
private static String readString(DataInputStream in) throws IOException
{
StringBuilder builder = new StringBuilder();
byte encoding = in.readByte();
int bytesToRead = in.readInt();
switch(encoding)
{
case STRING_ENCODING_ASCII:
for (int i = 0; i < bytesToRead; i++)
builder.append(Character.toChars(in.readByte()));
return builder.toString();
case STRING_ENCODING_UTF_16:
byte[] bytes = new byte[bytesToRead];
in.readFully(bytes);
return new String(bytes, "UTF-16");
default:
throw new RuntimeException("Unkown string encoding value " + encoding);
}
}
}
| 33,034 | 37.819036 | 208 | java |
JSAT | JSAT-master/JSAT/src/jsat/io/LIBSVMLoader.java | package jsat.io;
import java.io.*;
import java.util.*;
import jsat.DataSet;
import jsat.DataStore;
import jsat.RowMajorStore;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.DataPoint;
import jsat.datatransform.DenseSparceTransform;
import jsat.linear.*;
import jsat.regression.RegressionDataSet;
import jsat.utils.DoubleList;
import jsat.utils.IntList;
import jsat.utils.StringUtils;
/**
* Loads a LIBSVM data file into a {@link DataSet}. LIVSM files do not indicate
* whether or not the target variable is supposed to be numerical or
* categorical, so two different loading methods are provided. For a LIBSVM file
* to be loaded correctly, it must match the LIBSVM spec without extensions.
* <br><br>
* Each line should begin with a numeric value. This is either a regression
* target or a class label. <br>
* Then, for each non zero value in the data set, a space should precede an
* integer value index starting from 1 followed by a colon ":" followed by a
* numeric feature value. <br> The single space at the beginning should be the
* only space. There should be no double spaces in the file.
* <br><br>
* LIBSVM files do not explicitly specify the length of data vectors. This can
* be problematic if loading a testing and training data set, if the data sets
* do not include the same highest index as a non-zero value, the data sets will
* have incompatible vector lengths. To resolve this issue, use the loading
* methods that include the optional {@code vectorLength} parameter to specify
* the length before hand.
*
* @author Edward Raff
*/
public class LIBSVMLoader
{
private static boolean fastLoad = true;
private LIBSVMLoader()
{
}
/*
* LIBSVM format is sparse
* <VAL> <1 based Index>:<Value>
*
*/
/**
* Loads a new regression data set from a LIBSVM file, assuming the label is
* a numeric target value to predict
*
* @param file the file to load
* @return a regression data set
* @throws FileNotFoundException if the file was not found
* @throws IOException if an error occurred reading the input stream
*/
public static RegressionDataSet loadR(File file) throws FileNotFoundException, IOException
{
return loadR(file, 0.5);
}
/**
* Loads a new regression data set from a LIBSVM file, assuming the label is
* a numeric target value to predict
*
* @param file the file to load
* @param sparseRatio the fraction of non zero values to qualify a data
* point as sparse
* @return a regression data set
* @throws FileNotFoundException if the file was not found
* @throws IOException if an error occurred reading the input stream
*/
public static RegressionDataSet loadR(File file, double sparseRatio) throws FileNotFoundException, IOException
{
return loadR(file, sparseRatio, -1);
}
/**
* Loads a new regression data set from a LIBSVM file, assuming the label is
* a numeric target value to predict
*
* @param file the file to load
* @param sparseRatio the fraction of non zero values to qualify a data
* point as sparse
* @param vectorLength the pre-determined length of each vector. If given a
* negative value, the largest non-zero index observed in the data will be
* used as the length.
* @return a regression data set
* @throws FileNotFoundException if the file was not found
* @throws IOException if an error occurred reading the input stream
*/
public static RegressionDataSet loadR(File file, double sparseRatio, int vectorLength) throws FileNotFoundException, IOException
{
return loadR(new FileReader(file), sparseRatio, vectorLength);
}
/**
* Loads a new regression data set from a LIBSVM file, assuming the label is
* a numeric target value to predict
*
* @param isr the input stream for the file to load
* @param sparseRatio the fraction of non zero values to qualify a data
* point as sparse
* @return a regression data set
* @throws IOException if an error occurred reading the input stream
*/
public static RegressionDataSet loadR(InputStreamReader isr, double sparseRatio) throws IOException
{
return loadR(isr, sparseRatio, -1);
}
/**
* Loads a new regression data set from a LIBSVM file, assuming the label is
* a numeric target value to predict.
*
* @param reader the reader for the file to load
* @param sparseRatio the fraction of non zero values to qualify a data
* point as sparse
* @param vectorLength the pre-determined length of each vector. If given a
* negative value, the largest non-zero index observed in the data will be
* used as the length.
* @return a regression data set
* @throws IOException
*/
public static RegressionDataSet loadR(Reader reader, double sparseRatio, int vectorLength) throws IOException
{
return loadR(reader, sparseRatio, vectorLength, DataStore.DEFAULT_STORE);
}
/**
* Loads a new regression data set from a LIBSVM file, assuming the label is
* a numeric target value to predict.
*
* @param reader the reader for the file to load
* @param sparseRatio the fraction of non zero values to qualify a data
* point as sparse
* @param vectorLength the pre-determined length of each vector. If given a
* negative value, the largest non-zero index observed in the data will be
* used as the length.
* @param store the type of store to use for data
* @return a regression data set
* @throws IOException
*/
public static RegressionDataSet loadR(Reader reader, double sparseRatio, int vectorLength, DataStore store) throws IOException
{
return (RegressionDataSet) loadG(reader, sparseRatio, vectorLength, false, store);
}
/**
* Loads a new classification data set from a LIBSVM file, assuming the
* label is a nominal target value
*
* @param file the file to load
* @return a classification data set
* @throws FileNotFoundException if the file was not found
* @throws IOException if an error occurred reading the input stream
*/
public static ClassificationDataSet loadC(File file) throws FileNotFoundException, IOException
{
return loadC(new FileReader(file), 0.5);
}
/**
* Loads a new classification data set from a LIBSVM file, assuming the
* label is a nominal target value
*
* @param file the file to load
* @param sparseRatio the fraction of non zero values to qualify a data
* point as sparse
* @return a classification data set
* @throws FileNotFoundException if the file was not found
* @throws IOException if an error occurred reading the input stream
*/
public static ClassificationDataSet loadC(File file, double sparseRatio) throws FileNotFoundException, IOException
{
return loadC(file, sparseRatio, -1);
}
/**
* Loads a new classification data set from a LIBSVM file, assuming the
* label is a nominal target value
*
* @param file the file to load
* @param sparseRatio the fraction of non zero values to qualify a data
* point as sparse
* @param vectorLength the pre-determined length of each vector. If given a
* negative value, the largest non-zero index observed in the data will be
* used as the length.
* @return a classification data set
* @throws FileNotFoundException if the file was not found
* @throws IOException if an error occurred reading the input stream
*/
public static ClassificationDataSet loadC(File file, double sparseRatio, int vectorLength) throws FileNotFoundException, IOException
{
return loadC(new FileReader(file), sparseRatio, vectorLength);
}
/**
* Loads a new classification data set from a LIBSVM file, assuming the
* label is a nominal target value
*
* @param isr the input stream for the file to load
* @param sparseRatio the fraction of non zero values to qualify a data
* point as sparse
* @return a classification data set
* @throws IOException if an error occurred reading the input stream
*/
public static ClassificationDataSet loadC(InputStreamReader isr, double sparseRatio) throws IOException
{
return loadC(isr, sparseRatio, -1);
}
/**
* Loads a new classification data set from a LIBSVM file, assuming the
* label is a nominal target value
*
* @param reader the input stream for the file to load
* @param sparseRatio the fraction of non zero values to qualify a data
* point as sparse
* @param vectorLength the pre-determined length of each vector. If given a
* negative value, the largest non-zero index observed in the data will be
* used as the length.
* @param store the type of store to use for the data
* @return a classification data set
* @throws IOException if an error occurred reading the input stream
*/
public static ClassificationDataSet loadC(Reader reader, double sparseRatio, int vectorLength) throws IOException
{
return loadC(reader, sparseRatio, vectorLength, DataStore.DEFAULT_STORE);
}
/**
* Loads a new classification data set from a LIBSVM file, assuming the
* label is a nominal target value
*
* @param reader the input stream for the file to load
* @param sparseRatio the fraction of non zero values to qualify a data
* point as sparse
* @param vectorLength the pre-determined length of each vector. If given a
* negative value, the largest non-zero index observed in the data will be
* used as the length.
* @param store the type of store to use for the data
* @return a classification data set
* @throws IOException if an error occurred reading the input stream
*/
public static ClassificationDataSet loadC(Reader reader, double sparseRatio, int vectorLength, DataStore store) throws IOException
{
return (ClassificationDataSet) loadG(reader, sparseRatio, vectorLength, true, store);
}
/**
* Generic loader for both Classification and Regression interpretations.
* @param reader
* @param sparseRatio
* @param vectorLength
* @param classification {@code true} to treat as classification,
* {@code false} to treat as regression
* @return
* @throws IOException
*/
private static DataSet loadG(Reader reader, double sparseRatio, int vectorLength, boolean classification, DataStore store) throws IOException
{
StringBuilder processBuffer = new StringBuilder(20);
StringBuilder charBuffer = new StringBuilder(1024);
char[] buffer = new char[1024];
DataStore sparceVecs = store.emptyClone();
sparceVecs.setCategoricalDataInfo(new CategoricalData[0]);
/**
* The category "label" for each value loaded in
*/
List<Double> labelVals = new DoubleList();
Map<Double, Integer> possibleCats = new HashMap<>();
int maxLen= 1;
STATE state = STATE.INITIAL;
int position = 0;
SparseVector tempVec = new SparseVector(1, 1);
/**
* The index that we have parse out of a non zero pair
*/
int indexProcessing = -1;
while(true)
{
while(charBuffer.length()-position <= 1)//make sure we have chars to handle
{
//move everything to the front
charBuffer.delete(0, position);
position = 0;
int read = reader.read(buffer);
if(read < 0)
break;
charBuffer.append(buffer, 0, read);
}
if(charBuffer.length()-position == 0)//EOF, no more chars
{
if(state == STATE.LABEL)//last line was empty
{
double label = Double.parseDouble(processBuffer.toString());
if (!possibleCats.containsKey(label) && classification)
possibleCats.put(label, possibleCats.size());
labelVals.add(label);
sparceVecs.addDataPoint(new DataPoint(new SparseVector(maxLen, 0)));
}
else if(state == STATE.WHITESPACE_AFTER_LABEL)//last line was empty, but we have already eaten the label
{
sparceVecs.addDataPoint(new DataPoint(new SparseVector(maxLen, 0)));
}
else if(state == STATE.FEATURE_VALUE || state == STATE.WHITESPACE_AFTER_FEATURE)//line ended after a value pair
{
//process the last value pair & insert into vec
double value = StringUtils.parseDouble(processBuffer, 0, processBuffer.length());
processBuffer.delete(0, processBuffer.length());
maxLen = Math.max(maxLen, indexProcessing+1);
tempVec.setLength(maxLen);
if (value != 0)
tempVec.set(indexProcessing, value);
sparceVecs.addDataPoint(new DataPoint(tempVec.clone()));
}
else if(state == STATE.NEWLINE)
{
//nothing to do and everything already processed, just return
break;
}
else
throw new RuntimeException();
//we may have ended on a line, and have a sparse vec to add before returning
break;
}
char ch = charBuffer.charAt(position);
switch(state)
{
case INITIAL:
state = STATE.LABEL;
break;
case LABEL:
if (Character.isDigit(ch) || ch == '.' || ch == 'E' || ch == 'e' || ch == '-' || ch == '+')
{
processBuffer.append(ch);
position++;
}
else if (Character.isWhitespace(ch))//this gets spaces and new lines
{
double label = Double.parseDouble(processBuffer.toString());
if (!possibleCats.containsKey(label) && classification)
possibleCats.put(label, possibleCats.size());
labelVals.add(label);
//clean up and move to new state
processBuffer.delete(0, processBuffer.length());
if (ch == '\n' || ch == '\r')//empty line, so add a zero vector
{
tempVec.zeroOut();
sparceVecs.addDataPoint(new DataPoint(new SparseVector(maxLen, 0)));
state = STATE.NEWLINE;
}
else//just white space
{
tempVec.zeroOut();
state = STATE.WHITESPACE_AFTER_LABEL;
}
}
else
throw new RuntimeException("Invalid LIBSVM file");
break;
case WHITESPACE_AFTER_LABEL:
if (Character.isDigit(ch))//move to next state
{
state = STATE.FEATURE_INDEX;
}
else if (Character.isWhitespace(ch))
{
if (ch == '\n' || ch == '\r')
{
tempVec.zeroOut();
sparceVecs.addDataPoint(new DataPoint(new SparseVector(maxLen, 0)));///no features again, add zero vec
state = STATE.NEWLINE;
}
else//normal whie space
position++;
}
else
throw new RuntimeException();
break;
case FEATURE_INDEX:
if (Character.isDigit(ch))
{
processBuffer.append(ch);
position++;
}
else if(ch == ':')
{
indexProcessing = StringUtils.parseInt(processBuffer, 0, processBuffer.length())-1;
processBuffer.delete(0, processBuffer.length());
state = STATE.FEATURE_VALUE;
position++;
}
else
throw new RuntimeException();
break;
case FEATURE_VALUE:
//we need to accept all the values that may be part of a float value
if (Character.isDigit(ch) || ch == '.' || ch == 'E' || ch == 'e' || ch == '-' || ch == '+')
{
processBuffer.append(ch);
position++;
}
else
{
double value = StringUtils.parseDouble(processBuffer, 0, processBuffer.length());
processBuffer.delete(0, processBuffer.length());
maxLen = Math.max(maxLen, indexProcessing+1);
tempVec.setLength(maxLen);
if (value != 0)
tempVec.set(indexProcessing, value);
if (Character.isWhitespace(ch))
state = STATE.WHITESPACE_AFTER_FEATURE;
else
throw new RuntimeException();
}
break;
case WHITESPACE_AFTER_FEATURE:
if (Character.isDigit(ch))
state = STATE.FEATURE_INDEX;
else if (Character.isWhitespace(ch))
{
if (ch == '\n' || ch == '\r')
{
sparceVecs.addDataPoint(new DataPoint(tempVec.clone()));
tempVec.zeroOut();
state = STATE.NEWLINE;
}
else
position++;
}
break;
case NEWLINE:
if (ch == '\n' || ch == '\r')
position++;
else
{
state = STATE.LABEL;
}
break;
}
}
if (vectorLength > 0)
if (maxLen > vectorLength)
throw new RuntimeException("Length given was " + vectorLength + ", but observed length was " + maxLen);
else
maxLen = vectorLength;
if(classification)
{
CategoricalData predicting = new CategoricalData(possibleCats.size());
//Give categories a unique ordering to avoid loading issues based on the order categories are presented
List<Double> allCatKeys = new DoubleList(possibleCats.keySet());
Collections.sort(allCatKeys);
for(int i = 0; i < allCatKeys.size(); i++)
possibleCats.put(allCatKeys.get(i), i);
//apply to target values now
IntList label_targets = IntList.view(labelVals.stream()
.mapToInt(possibleCats::get)
.toArray());
sparceVecs.setNumNumeric(maxLen);
sparceVecs.finishAdding();
ClassificationDataSet cds = new ClassificationDataSet(sparceVecs, label_targets);
if(store instanceof RowMajorStore)
cds.applyTransform(new DenseSparceTransform(sparseRatio));
return cds;
}
else//regression
{
sparceVecs.setNumNumeric(maxLen);
sparceVecs.finishAdding();
RegressionDataSet rds = new RegressionDataSet(sparceVecs, labelVals);
rds.applyTransform(new DenseSparceTransform(sparseRatio));
return rds;
}
}
/**
* Writes out the given classification data set as a LIBSVM data file
* @param data the data set to write to a file
* @param os the output stream to write to. The stream will not be closed or
* flushed by this method
*/
public static void write(ClassificationDataSet data, OutputStream os)
{
PrintWriter writer = new PrintWriter(os);
for(int i = 0; i < data.size(); i++)
{
int pred = data.getDataPointCategory(i);
Vec vals = data.getDataPoint(i).getNumericalValues();
writer.write(pred + " ");
for(IndexValue iv : vals)
{
double val = iv.getValue();
if(Math.rint(val) == val)//cast to long before writting to save space
writer.write((iv.getIndex()+1) + ":" + (long)val + " ");//+1 b/c 1 based indexing
else
writer.write((iv.getIndex()+1) + ":" + val + " ");//+1 b/c 1 based indexing
}
writer.write("\n");
}
writer.flush();
writer.close();
}
/**
* Writes out the given regression data set as a LIBSVM data file
* @param data the data set to write to a file
* @param os the output stream to write to. The stream will not be closed or
* flushed by this method
*/
public static void write(RegressionDataSet data, OutputStream os)
{
PrintWriter writer = new PrintWriter(os);
for(int i = 0; i < data.size(); i++)
{
double pred = data.getTargetValue(i);
Vec vals = data.getDataPoint(i).getNumericalValues();
writer.write(pred + " ");
for(IndexValue iv : vals)
{
double val = iv.getValue();
if(Math.rint(val) == val)//cast to long before writting to save space
writer.write((iv.getIndex()+1) + ":" + (long)val + " ");//+1 b/c 1 based indexing
else
writer.write((iv.getIndex()+1) + ":" + val + " ");//+1 b/c 1 based indexing
}
writer.write("\n");
}
writer.flush();
writer.close();
}
/**
* Returns a DataWriter object which can be used to stream a set of
* arbitrary datapoints into the given output stream. This works in a thread
* safe manner.<br>
* Categorical information dose not need to be specified since LIBSVM files can't store categorical features.
*
* @param out the location to store all the data
* @param dim information on how many numeric features exist
* @param type what type of data set (simple, classification, regression) to be written
* @return the DataWriter that the actual points can be streamed through
* @throws IOException
*/
public static DataWriter getWriter(OutputStream out, int dim, DataWriter.DataSetType type) throws IOException
{
DataWriter dw = new DataWriter(out, new CategoricalData[0], dim, type)
{
@Override
protected void writeHeader(CategoricalData[] catInfo, int dim, DataWriter.DataSetType type, OutputStream out)
{
//nothing to do, LIBSVM format has no header
}
@Override
protected void pointToBytes(double weight, DataPoint dp, double label, ByteArrayOutputStream byteOut)
{
PrintWriter writer = new PrintWriter(byteOut);
//write out label
if(this.type == DataSetType.REGRESSION)
writer.write(label + " ");
else if(this.type == DataSetType.CLASSIFICATION)
writer.write((int)label + " ");
else if(this.type == DataSetType.SIMPLE)
writer.write("0 ");
Vec vals = dp.getNumericalValues();
for(IndexValue iv : vals)
{
double val = iv.getValue();
if(Math.rint(val) == val)//cast to long before writting to save space
writer.write((iv.getIndex()+1) + ":" + (long)val + " ");//+1 b/c 1 based indexing
else
writer.write((iv.getIndex()+1) + ":" + val + " ");//+1 b/c 1 based indexing
}
writer.write("\n");
writer.flush();
}
};
return dw;
}
/**
* Simple state machine used to parse LIBSVM files
*/
private enum STATE
{
/**
* Initial state, doesn't actually do anything
*/
INITIAL,
LABEL,
WHITESPACE_AFTER_LABEL,
FEATURE_INDEX,
FEATURE_VALUE,
WHITESPACE_AFTER_FEATURE,
NEWLINE,
}
}
| 25,808 | 39.708202 | 145 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/CholeskyDecomposition.java |
package jsat.linear;
import java.io.Serializable;
import java.util.concurrent.ExecutorService;
import static java.lang.Math.*;
import java.util.concurrent.CountDownLatch;
import java.util.logging.Level;
import java.util.logging.Logger;
import static jsat.linear.LUPDecomposition.*;
import jsat.utils.SystemInfo;
/**
* The Cholesky Decomposition factors a symmetric positive definite matrix A
* into the form A = L L<sup>T</sup>. The Cholesky Decomposition of a matrix is
* unique.
*
* @author Edward Raff
*/
public class CholeskyDecomposition implements Serializable
{
//TODO add block decomposition for efficency
private static final long serialVersionUID = 8925094456733750112L;
/**
* Contains the matrix 'L', but instead of just keeping the lower triangular, we keep it
* in a symmetric copy so {@link LUPDecomposition#forwardSub(jsat.linear.Matrix, jsat.linear.Vec) }
* and backSub can be done without copying.
*/
private Matrix L;
/**
* Computes the Cholesky Decomposition of the matrix A. The matrix
* <tt>A</tt> will be altered to form the decomposition <tt>L</tt>. If A is
* still needed after this computation a {@link Matrix#clone() clone} of the
* matrix should be given instead. <br>
* NOTE: No check for the symmetric positive definite property will occur.
* The results of passing a matrix that does not meet this properties is
* undefined.
*
* @param A the matrix to create the Cholesky Decomposition of
*/
public CholeskyDecomposition(final Matrix A)
{
if(!A.isSquare())
throw new ArithmeticException("Input matrix must be symmetric positive definite");
L = A;
final int ROWS = A.rows();
for (int j = 0; j < ROWS; j++)
{
double L_jj = computeLJJ(A, j);
L.set(j, j, L_jj);
updateRows(j, j + 1, ROWS, 1, A, L_jj);
}
copyUpperToLower(ROWS);
}
/**
* Computes the Cholesky Decomposition of the matrix A. The matrix
* <tt>A</tt> will be altered to form the decomposition <tt>L</tt>. If A is
* still needed after this computation a {@link Matrix#clone() clone} of the
* matrix should be given instead. <br>
* NOTE: No check for the symmetric positive definite property will occur.
* The results of passing a matrix that does not meet this properties is
* undefined.
*
* @param A the matrix to create the Cholesky Decomposition of
* @param threadpool the source of threads for computation
*/
public CholeskyDecomposition(final Matrix A, ExecutorService threadpool)
{
if(!A.isSquare())
throw new ArithmeticException("Input matrix must be symmetric positive definite");
L = A;
final int ROWS = A.rows();
double nextLJJ = computeLJJ(A, 0);
for (int j = 0; j < ROWS; j++)
{
final int J = j;
final double L_jj = nextLJJ;//computeLJJ(A, j);
L.set(j, j, L_jj);
final CountDownLatch latch = new CountDownLatch(SystemInfo.LogicalCores-1);
for (int i = 1; i < SystemInfo.LogicalCores; i++)
{
final int ID = i;
threadpool.submit(new Runnable()
{
@Override
public void run()
{
updateRows(J, J + 1+ID, ROWS, SystemInfo.LogicalCores, A, L_jj);
latch.countDown();
}
});
}
try
{
updateRows(J, J+1, ROWS, SystemInfo.LogicalCores, A, L_jj);
if(j+1 < ROWS)
nextLJJ = computeLJJ(A, j+1);
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(CholeskyDecomposition.class.getName()).log(Level.SEVERE, null, ex);
}
}
copyUpperToLower(ROWS);
}
/**
* The Cholesky Decomposition computes the factorization A = L L<sup>T</sup>. This method returns L<sup>T</sup>
* @return The upper triangular matrix L<sup>T</sup>
*/
public Matrix getLT()
{
Matrix LT = new DenseMatrix(L.rows(), L.cols());
for(int i = 0; i < L.rows(); i++)
for(int j = i; j < L.rows(); j++)
LT.set(i, j, L.get(i, j));
return LT;
}
/**
* Solves the linear system of equations A x = b
* @param b the vectors of values
* @return the vector x such that A x = b
*/
public Vec solve(Vec b)
{
//Solve A x = L L^T x = b, for x
//First solve L y = b
Vec y = forwardSub(L, b);
//Sole L^T x = y
Vec x = backSub(L, y);
return x;
}
/**
* Solves the linear system of equations A x = B
* @param B the matrix of values
* @return the matrix c such that A x = B
*/
public Matrix solve(Matrix B)
{
//Solve A x = L L^T x = b, for x
//First solve L y = b
Matrix y = forwardSub(L, B);
//Sole L^T x = y
Matrix x = backSub(L, y);
return x;
}
/**
* Solves the linear system of equations A x = B
* @param B the matrix of values
* @param threadpool the source of threads for parallel evaluation
* @return the matrix c such that A x = B
*/
public Matrix solve(Matrix B, ExecutorService threadpool)
{
//Solve A x = L L^T x = b, for x
//First solve L y = b
Matrix y = forwardSub(L, B, threadpool);
//Sole L^T x = y
Matrix x = backSub(L, y, threadpool);
return x;
}
/**
* Computes the determinant of A
* @return the determinant of A
*/
public double getDet()
{
return Math.exp(getLogDet());
}
/**
* Computes the log of the determinant of A. It is more numerically stable
* than explicitly calling {@link Math#log(double) } on the value returned
* by {@link #getDet() }.
*
* @return the log of the determinant of A.
*/
public double getLogDet()
{
double log_det = 0;
for(int i = 0; i < L.rows(); i++)
log_det += 2*Math.log(L.get(i, i));
return log_det;
}
private double computeLJJ(final Matrix A, final int j)
{
/**
* _________________
* / j - 1
* / =====
* / \ 2
* L = / A - > L
* j j / j j / j k
* / =====
* \/ k = 1
*/
double L_jj = A.get(j, j);
for(int k = 0; k < j; k++)
L_jj -= pow(L.get(j, k), 2);
final double result = sqrt(L_jj);
if(Double.isNaN(result))
throw new ArithmeticException("input matrix is not positive definite");
return result;
}
private void updateRows(final int j, final int start, final int end, final int skip, final Matrix A, final double L_jj)
{
/*
*
* / j - 1 \
* | ===== |
* 1 | \ |
* L = ---- |A - > L L |
* i j L | i j / i k j k|
* j j | ===== |
* \ k = 1 /
*/
for(int i = start; i < end; i+=skip)
{
double L_ij = A.get(i, j);
for(int k = 0; k < j; k++)
L_ij -= L.get(i, k)*L.get(j, k);
L.set(i, j, L_ij/L_jj);
}
}
private void copyUpperToLower(final int ROWS)
{
//Now copy so that All of L is filled
for (int i = 0; i < ROWS; i++)
for (int j = 0; j < i; j++)
L.set(j, i, L.get(i, j));
}
}
| 8,172 | 30.801556 | 123 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/ConcatenatedVec.java | package jsat.linear;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
/**
* ConcatenatedVec provides a light wrapper around a list of vectors to provide
* a view of one single vector that's length is the sum of the lengths of the
* inputs.
*
* @author Edward Raff
*/
public class ConcatenatedVec extends Vec
{
private static final long serialVersionUID = -1412322616974470550L;
private Vec[] vecs;
private int[] lengthSums;
private int totalLength;
/**
* Creates a new Vector that is the concatenation of the given vectors in
* the given order. The vector created is backed by the ones provided, and
* any mutation to one is visible in the others.
*
* @param vecs the list of vectors to concatenate
*/
public ConcatenatedVec(List<Vec> vecs)
{
this.vecs = new Vec[vecs.size()];
lengthSums = new int[vecs.size()];
totalLength = 0;
for(int i = 0; i < vecs.size(); i++)
{
lengthSums[i] = totalLength;
this.vecs[i] = vecs.get(i);
totalLength += vecs.get(i).length();
}
}
/**
* Creates a new Vector that is the concatenation of the given vectors in
* the given order. The vector created is backed by the ones provided, and
* any mutation to one is visible in the others.
*
* @param vecs the array of vectors to concatenate
*/
public ConcatenatedVec(Vec... vecs)
{
this(Arrays.asList(vecs));
}
@Override
public int length()
{
return totalLength;
}
@Override
public double get(int index)
{
int baseIndex = getBaseIndex(index);
return vecs[baseIndex].get(index-lengthSums[baseIndex]);
}
@Override
public void set(int index, double val)
{
int baseIndex = getBaseIndex(index);
vecs[baseIndex].set(index-lengthSums[baseIndex], val);
}
//The following are implemented only for performance reasons
@Override
public void increment(int index, double val)
{
int baseIndex = getBaseIndex(index);
vecs[baseIndex].increment(index-lengthSums[baseIndex], val);
}
@Override
public int nnz()
{
int nnz = 0;
for(Vec v : vecs)
nnz += v.nnz();
return nnz;
}
@Override
public void mutableAdd(double c, Vec b)
{
for(int i = 0; i < vecs.length; i++)
{
vecs[i].mutableAdd(c, new SubVector(lengthSums[i], vecs[i].length(), b));
}
}
@Override
public Iterator<IndexValue> getNonZeroIterator(final int start)
{
return new Iterator<IndexValue>()
{
int baseIndex = -1;
IndexValue valToSend = new IndexValue(0, 0);
Iterator<IndexValue> curIter = null;
IndexValue nextValue = null;
@Override
public boolean hasNext()
{
if(baseIndex == -1)//initialize everything
{
baseIndex = getBaseIndex(start);
int curIndexConsidering = start;
//Keep moving till we
while(baseIndex < vecs.length && !vecs[baseIndex].getNonZeroIterator(curIndexConsidering-lengthSums[baseIndex]).hasNext())
{
baseIndex++;
if(baseIndex < vecs.length)
curIndexConsidering = lengthSums[baseIndex];
}
if(baseIndex >= vecs.length)
return false;//All zeros beyond this point
curIter = vecs[baseIndex].getNonZeroIterator(curIndexConsidering-lengthSums[baseIndex]);
nextValue = curIter.next();
return true;
}
else
return nextValue != null;
}
@Override
public IndexValue next()
{
if(nextValue == null)
throw new NoSuchElementException();
valToSend.setIndex(nextValue.getIndex()+lengthSums[baseIndex]);
valToSend.setValue(nextValue.getValue());
if(curIter.hasNext())
nextValue = curIter.next();
else
{
baseIndex++;
while(baseIndex < vecs.length && !(curIter = vecs[baseIndex].getNonZeroIterator()).hasNext())//keep moving till with find a non empty vec
baseIndex++;
if(baseIndex >= vecs.length)//we have run out
{
nextValue = null;
curIter = null;
}
else
{
nextValue = curIter.next();
}
}
return valToSend;
}
@Override
public void remove()
{
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
};
}
@Override
public boolean isSparse()
{
for(Vec v : vecs)
if(v.isSparse())
return true;
return false;
}
@Override
public ConcatenatedVec clone()
{
Vec[] newVecs = new Vec[vecs.length];
for(int i = 0; i < vecs.length; i++)
newVecs[i] = vecs[i].clone();
return new ConcatenatedVec(Arrays.asList(newVecs));
}
private int getBaseIndex(int index)
{
int basIndex = Arrays.binarySearch(lengthSums, index);
if(basIndex < 0)
basIndex = (-(basIndex)-2);//-1 extra b/c we want to be on the lesser side
return basIndex;
}
@Override
public void setLength(int length)
{
if(length < 0)
throw new ArithmeticException("Can not create an array of negative length");
int toAdd = length - length();
int pos = vecs.length-1;
if(toAdd > 0)
{
vecs[pos].setLength(vecs[pos].length()+toAdd);
}
else//decreasing
{
while(Math.abs(toAdd) >= vecs[pos].length())
{
if(vecs[pos].nnz() > 0)
throw new RuntimeException("Can't decrease the length of this vector from " + length() + " to " + length + " due to non-zero value");
toAdd += vecs[pos--].length();
}
//if we can't do this, it will err at us
vecs[pos].setLength(vecs[pos].length()+toAdd);
vecs = Arrays.copyOf(vecs, pos+1);
}
}
}
| 6,965 | 29.419214 | 157 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/ConstantVector.java | package jsat.linear;
/**
* This class provides a simple utility to represent an immutable vector where
* all values in the vector must have the same constant value. The standard
* mutable methods can not alter this vector. However, the length and constant
* value can be altered by calling the {@link #setLength(int) } and
* {@link #setConstant(double) } values respectively. The Constant Vector
* representation uses only constant space.
* <br><br>
* This class can be useful in providing a generalized way to handle multiple
* unique values or a constant value. For example, a separate regularization
* constant could be used for every feature in a learning problem. Instead of
* writing code to handle multiple values separately from a single value, a
* ConstantVector can be used so that the constant value case can be an
* efficient call to the multiple value version of the code.
*
* @author Edward Raff
*/
public class ConstantVector extends Vec
{
private static final long serialVersionUID = 4840204242189111630L;
private double constant;
private int length;
/**
* Creates a new vector where all values have a single implicit value
* @param constant the constant to use as the single value for all indices
* @param length the length of this vector
*/
public ConstantVector(double constant, int length)
{
setConstant(constant);
setLength(length);
}
/**
* Sets the constant value that will be used as the value stored in every
* index of this vector.
* @param constant the constant value to represent as a vector
*/
public void setConstant(double constant)
{
this.constant = constant;
}
/**
* Sets the length of this vector. The length must be a non zero value
* @param length the new length for this vector
*/
public void setLength(int length)
{
if(length < 1)
throw new ArithmeticException("Vector length must be a positive constant");
this.length = length;
}
@Override
public int length()
{
return length;
}
@Override
public double get(int index)
{
return constant;
}
@Override
public double sum()
{
return constant*length;
}
@Override
public double mean()
{
return constant;
}
@Override
public double variance()
{
return 0;
}
@Override
public double standardDeviation()
{
return 0;
}
@Override
public Vec normalized()
{
return new ConstantVector(constant/Math.sqrt(constant*constant*length), length);
}
@Override
public double pNorm(double p)
{
return Math.pow(length*Math.pow(Math.abs(constant), p), 1/p);
}
@Override
public double median()
{
return constant;
}
@Override
public void set(int index, double val)
{
throw new ArithmeticException("ConstantVector does not support mutation");
}
@Override
public boolean isSparse()
{
return false;
}
@Override
public ConstantVector clone()
{
return new ConstantVector(constant, length);
}
}
| 3,253 | 23.466165 | 88 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/DenseMatrix.java |
package jsat.linear;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.utils.FakeExecutor;
import static java.lang.Math.*;
import static jsat.linear.GenericMatrix.NB2;
import static jsat.utils.SystemInfo.*;
/**
*
* @author Edward Raff
*/
public class DenseMatrix extends GenericMatrix
{
private static final long serialVersionUID = -3112110093920307822L;
private double[][] matrix;
/**
* Creates a new matrix based off the given vectors.
* @param a the first Vector, this new Matrix will have as many rows as the length of this vector
* @param b the second Vector, this new Matrix will have as many columns as this length of this vector
*/
public DenseMatrix(Vec a, Vec b)
{
matrix = new double[a.length()][b.length()];
for(int i = 0; i < a.length(); i++)
{
Vec rowVals = b.multiply(a.get(i));
for(int j = 0; j < b.length(); j++)
matrix[i][j] = rowVals.get(j);
}
}
/**
* Creates a new matrix of zeros
* @param rows the number of rows
* @param cols the number of columns
*/
public DenseMatrix(int rows, int cols)
{
matrix = new double[rows][cols];
}
/**
* Creates a new matrix that is a clone of the given matrix.
* An error will be throw if the rows of the given matrix
* are not all the same size
*
* @param matrix the matrix to clone the values of
*/
public DenseMatrix(double[][] matrix)
{
this.matrix = new double[matrix.length][matrix[0].length];
for(int i = 0; i < this.matrix.length; i++)
if(matrix[i].length != this.matrix[i].length)//The matrix we were given better have rows of the same length!
throw new RuntimeException("Given matrix was not of consistent size (rows have diffrent lengths)");
else
System.arraycopy(matrix[i], 0, this.matrix[i], 0, this.matrix[i].length);
}
/**
* Creates a new dense matrix that has a copy of all the same values as the
* given one
* @param toCopy the matrix to copy
*/
public DenseMatrix(Matrix toCopy)
{
this(toCopy.rows(), toCopy.cols());
toCopy.copyTo(this);
}
@Override
protected Matrix getMatrixOfSameType(int rows, int cols)
{
return new DenseMatrix(rows, cols);
}
@Override
public void mutableAdd(double c, Matrix b)
{
if(!sameDimensions(this, b))
throw new ArithmeticException("Matrix dimensions do not agree");
for(int i = 0; i < rows(); i++)
for(int j = 0; j < cols(); j++)
this.matrix[i][j] += c*b.get(i, j);
}
@Override
public void multiply(Vec b, double z, Vec c)
{
if(this.cols() != b.length())
throw new ArithmeticException("Matrix dimensions do not agree, [" + rows() +"," + cols() + "] x [" + b.length() + ",1]" );
if(this.rows() != c.length())
throw new ArithmeticException("Target vector dimension does not agree with matrix dimensions. Matrix has " + rows() + " rows but tagert has " + c.length());
for(int i = 0; i < rows(); i++)
{
//The Dense construcure does not clone the matrix, it just takes the refernce -making it fast
DenseVector row = new DenseVector(matrix[i]);
c.increment(i, row.dot(b)*z);//We use the dot product in this way so that if the incoming matrix is sparce, we can take advantage of save computaitons
}
}
@Override
public void transposeMultiply(double c, Vec b, Vec x)
{
if(this.rows() != b.length())
throw new ArithmeticException("Matrix dimensions do not agree, [" + cols() +"," + rows() + "] x [" + b.length() + ",1]" );
else if(this.cols() != x.length())
throw new ArithmeticException("Matrix dimensions do not agree with target vector");
for(int i = 0; i < rows(); i++)//if b was sparce, we want to skip every time b_i = 0
{
double b_i = b.get(i);
if(b_i == 0)//Skip, not quite as good as sparce handeling
continue;//TODO handle sparce input vector better
double[] A_i = this.matrix[i];
for(int j = 0; j < cols(); j++)
x.increment(j, c*b_i*A_i[j]);
}
}
@SuppressWarnings("unused")
private Matrix blockMultiply(Matrix b)
{
if(!canMultiply(this, b))
throw new ArithmeticException("Matrix dimensions do not agree");
DenseMatrix result = new DenseMatrix(this.rows(), b.cols());
///Should choose step size such that 2*NB2^2 * dataTypeSize <= CacheSize
int iLimit = result.rows();
int jLimit = result.cols();
int kLimit = this.cols();
for(int i0 = 0; i0 < iLimit; i0+=NB2)
for(int k0 = 0; k0 < kLimit; k0+=NB2)
for(int j0 = 0; j0 < jLimit; j0+=NB2)
{
for(int i = i0; i < min(i0+NB2, iLimit); i++)
{
double[] c_row_i = result.matrix[i];
for(int k = k0; k < min(k0+NB2, kLimit); k++)
{
double a = this.matrix[i][k];
for(int j = j0; j < min(j0+NB2, jLimit); j++)
c_row_i[j] += a * b.get(k, j);
}
}
}
return result;
}
/**
* Copies the values from A_k to vk
* @param k the k+1 index copying will start at
* @param M how far to copy values
* @param vk the array to copy into
* @param A_k the source row of the matrix
* @param vkNorm the initial value for vkNorm
* @return vkNorm plus the summation of the squared values for all values copied into vk
*/
private double initalVKNormCompute(int k, int M, double[] vk, double[] A_k)
{
double vkNorm = 0.0;
for(int i = k+1; i < M; i++)
{
vk[i] = A_k[i];
vkNorm += vk[i]*vk[i];
}
return vkNorm;
}
private void qrUpdateQ(DenseMatrix Q, int k, double[] vk, double TwoOverBeta)
{
//Computing Q
//We are computing Q' in what we are treating as the column major order, which represents Q in row major order, which is what we want!
for (int j = 0; j < Q.cols(); j++)
{
double[] Q_j = Q.matrix[j];
double y = 0;//y = vk dot A_j
for (int i = k; i < Q.cols(); i++)
y += vk[i] * Q_j[i];
y *= TwoOverBeta;
for (int i = k; i < Q.rows(); i++)
{
Q_j[i] -= y * vk[i];
}
}
}
private void qrUpdateR(int k, int N, DenseMatrix A, double[] vk, double TwoOverBeta, int M)
{
//First run of loop removed, as it will be setting zeros. More accurate to just set them ourselves
if(k < N)
{
qrUpdateRFirstIteration(A, k, vk, TwoOverBeta, M);
}
//The rest of the normal look
for(int j = k+1; j < N; j++)
{
double[] A_j = A.matrix[j];
double y = 0;//y = vk dot A_j
for(int i = k; i < A.cols(); i++)
y += vk[i]*A_j[i];
y *= TwoOverBeta;
for(int i = k; i < M; i++)
A_j[i] -= y*vk[i];
}
}
private void qrUpdateRFirstIteration(DenseMatrix A, int k, double[] vk, double TwoOverBeta, int M)
{
double[] A_j = A.matrix[k];
double y = 0;//y = vk dot A_j
for(int i = k; i < A.cols(); i++)
y += vk[i]*A_j[i];
y *= TwoOverBeta;
A_j[k] -= y*vk[k];
for(int i = k+1; i < M; i++)
A_j[i] = 0.0;
}
@Override
public void changeSize(int newRows, int newCols)
{
if(newRows <= 0)
throw new ArithmeticException("Matrix must have a positive number of rows");
if(newCols <= 0)
throw new ArithmeticException("Matrix must have a positive number of columns");
final int oldRow = matrix.length;
//first, did the cols change? That forces a lot of allocation.
if(newCols != cols())
{
for(int i = 0; i < matrix.length; i++)
matrix[i] = Arrays.copyOf(matrix[i], newCols);
}
//now cols are equal, need to add or remove rows
matrix = Arrays.copyOf(matrix, newRows);
for(int i = oldRow; i < newRows; i++)
matrix[i] = new double[cols()];
}
private class BlockMultRun implements Runnable
{
final CountDownLatch latch;
final DenseMatrix result;
final DenseMatrix b;
final int kLimit, jLimit, iLimit, threadID;
public BlockMultRun(CountDownLatch latch, DenseMatrix result, DenseMatrix b, int threadID)
{
this.latch = latch;
this.result = result;
this.b = b;
this.kLimit = cols();
this.jLimit = result.cols();
this.iLimit = result.rows();
this.threadID = threadID;
}
@Override
public void run()
{
for (int i0 = NB2 * threadID; i0 < iLimit; i0 += NB2 * LogicalCores)
for (int k0 = 0; k0 < kLimit; k0 += NB2)
for (int j0 = 0; j0 < jLimit; j0 += NB2)
for (int i = i0; i < min(i0 + NB2, iLimit); i++)
{
final double[] Ci = result.matrix[i];
for (int k = k0; k < min(k0 + NB2, kLimit); k++)
{
double a = matrix[i][k];
double[] Bk = b.matrix[k];
for (int j = j0; j < min(j0 + NB2, jLimit); j++)
Ci[j] += a * Bk[j];
}
}
latch.countDown();
}
}
private void blockMultiply(DenseMatrix b, ExecutorService threadPool, DenseMatrix C)
{
if(!canMultiply(this, b))
throw new ArithmeticException("Matrix dimensions do not agree");
else if(this.rows() != C.rows() || b.cols() != C.cols())
throw new ArithmeticException("Destination matrix does not match the multiplication dimensions");
CountDownLatch latch = new CountDownLatch(LogicalCores);
for(int threadID = 0; threadID < LogicalCores; threadID++)
threadPool.submit(new BlockMultRun(latch, C, b, threadID));
try
{
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(DenseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
@Override
public void transposeMultiply(final Matrix b, Matrix C)
{
transposeMultiply(b, C, new FakeExecutor());
}
@Override
public void transposeMultiply(final Matrix b, final Matrix C, ExecutorService threadPool)
{
if(this.rows() != b.rows())//Normaly it is A_cols == B_rows, but we are doint A'*B, not A*B
throw new ArithmeticException("Matrix dimensions do not agree [" + this.cols() + ", " + this.rows()+ "] * [" + b.rows() + ", " + b.cols() + "]");
else if(this.cols() != C.rows() || b.cols() != C.cols())
throw new ArithmeticException("Destination matrix does not have matching dimensions");
final DenseMatrix A = this;
//We only want to take care of the case where everything is of this class. Else let the generic version handle quirks
if( !(b instanceof DenseMatrix && C instanceof DenseMatrix) )
{
super.transposeMultiply(b, C, threadPool);
return;
}
final int iLimit = C.rows();
final int jLimit = C.cols();
final int kLimit = this.rows();
final int blockStep = Math.min(NB2, Math.max(iLimit/LogicalCores, 1));//reduce block size so we can use all cores if needed.
final CountDownLatch cdl = new CountDownLatch(LogicalCores);
for(int threadNum = 0; threadNum < LogicalCores; threadNum++)
{
final int threadID = threadNum;
threadPool.submit(new Runnable() {
public void run()
{
DenseMatrix BB = (DenseMatrix) b;
DenseMatrix CC = (DenseMatrix) C;
for (int i0 = blockStep * threadID; i0 < iLimit; i0 += blockStep * LogicalCores)
for (int k0 = 0; k0 < kLimit; k0 += blockStep)
for (int j0 = 0; j0 < jLimit; j0 += blockStep)
{
for (int k = k0; k < min(k0 + blockStep, kLimit); k++)
{
double[] A_row_k = A.matrix[k];
double[] B_row_k = BB.matrix[k];
for (int i = i0; i < min(i0 + blockStep, iLimit); i++)
{
final double a = A_row_k[i];
final double[] c_row_i = CC.matrix[i];
for (int j = j0; j < min(j0 + blockStep, jLimit); j++)
c_row_i[j] += a * B_row_k[j];
}
}
}
cdl.countDown();
}
});
}
try
{
cdl.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(DenseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
@Override
public void multiply(Matrix b, Matrix C)
{
if(!canMultiply(this, b))
throw new ArithmeticException("Matrix dimensions do not agree");
else if(this.rows() != C.rows() || b.cols() != C.cols())
throw new ArithmeticException("Target Matrix is no the correct size");
//We only want to opt the case where everyone is dense, else - let the generic version handle quierks
if( !(C instanceof DenseMatrix && b instanceof DenseMatrix))
{
super.multiply(b, C);
return;
}
/*
* In stead of row echelon order (i, j, k), we compue in "pure row oriented"
*
* see
*
* Data structures in Java for matrix computations
*
* CONCURRENCY AND COMPUTATION: PRACTICE AND EXPERIENCE
* Concurrency Computat.: Pract. Exper. 2004; 16:799–815 (DOI: 10.1002/cpe.793)
*
*/
DenseMatrix result = (DenseMatrix) C;
DenseMatrix B = (DenseMatrix) b;
//Pull out the index operations to hand optimize for speed.
double[] Arowi;
double[] Browk;
double[] Crowi;
for (int i = 0; i < result.rows(); i++)
{
Arowi = this.matrix[i];
Crowi = result.matrix[i];
for (int k = 0; k < this.cols(); k++)
{
double a = Arowi[k];
Browk = B.matrix[k];
for (int j = 0; j < Crowi.length; j++)
Crowi[j] += a * Browk[j];
}
}
}
/**
* this is a direct conversion of the outer most loop of {@link #multiply(jsat.linear.Matrix) }
*/
private class MultRun implements Runnable
{
final CountDownLatch latch;
final DenseMatrix A;
final DenseMatrix B, result;
final int threadID;
public MultRun(CountDownLatch latch, DenseMatrix A, DenseMatrix result, DenseMatrix B, int threadID)
{
this.latch = latch;
this.A = A;
this.result = result;
this.B = B;
this.threadID = threadID;
}
public void run()
{
//Pull out the index operations to hand optimize for speed.
double[] Ai;
double[] Bi;
double[] Ci;
for (int i = 0 + threadID; i < result.rows(); i += LogicalCores)
{
Ai = A.matrix[i];
Ci = result.matrix[i];
for (int k = 0; k < A.cols(); k++)
{
double a = Ai[k];
Bi = B.matrix[k];
for (int j = 0; j < Ci.length; j++)
Ci[j] += a * Bi[j];
}
}
latch.countDown();
}
}
@Override
public void multiply(Matrix b, Matrix C, ExecutorService threadPool)
{
//We only care when everyone is of this class, else let the generic implementatino handle quirks
if(!(b instanceof DenseMatrix && C instanceof DenseMatrix))
{
super.multiply(b, C, threadPool);
return;
}
if(this.rows()/NB2 >= LogicalCores)//Perform block execution only when we have a large enough matrix to keep ever core busy!
{
blockMultiply((DenseMatrix)b, threadPool, (DenseMatrix)C);
return;
}
if(!canMultiply(this, b))
throw new ArithmeticException("Matrix dimensions do not agree");
else if(this.rows() != C.rows() || b.cols() != C.cols())
throw new ArithmeticException("Destination matrix does not match the multiplication dimensions");
CountDownLatch cdl = new CountDownLatch(LogicalCores);
for (int threadID = 0; threadID < LogicalCores; threadID++)
threadPool.submit(new MultRun(cdl, this, (DenseMatrix)C, (DenseMatrix)b, threadID));
try
{
cdl.await();
}
catch (InterruptedException ex)
{
//faulre? Gah - try seriel
this.multiply(b, C);
}
}
@Override
public void mutableMultiply(double c)
{
for(int i = 0; i < rows(); i++)
for(int j = 0; j < cols(); j++)
matrix[i][j] *= c;
}
@Override
public void mutableTranspose()
{
for(int i = 0; i < rows()-1; i++)
for(int j = i+1; j < cols(); j++)
{
double tmp = matrix[j][i];
matrix[j][i] = matrix[i][j];
matrix[i][j] = tmp;
}
}
@Override
public DenseMatrix transpose()
{
DenseMatrix toReturn = new DenseMatrix(cols(), rows());
this.transpose(toReturn);
return toReturn;
}
@Override
public void transpose(Matrix C)
{
if(this.rows() != C.cols() || this.cols() != C.rows())
throw new ArithmeticException("Target matrix does not have the correct dimensions");
for (int i0 = 0; i0 < rows(); i0 += NB2)
for (int j0 = 0; j0 < cols(); j0 += NB2)
for (int i = i0; i < min(i0+NB2, rows()); i++)
for (int j = j0; j < min(j0+NB2, cols()); j++)
C.set(j, i, this.get(i, j));
}
@Override
public double get(int i, int j)
{
return matrix[i][j];
}
@Override
public void set(int i, int j, double value)
{
matrix[i][j] = value;
}
@Override
public int rows()
{
return matrix.length;
}
@Override
public int cols()
{
return matrix[0].length;
}
@Override
public boolean isSparce()
{
return false;
}
@Override
public void swapRows(int r1, int r2)
{
if(r1 >= rows() || r2 >= rows())
throw new ArithmeticException("Can not swap row, matrix is smaller then requested");
else if(r1 < 0 || r2 < 0)
throw new ArithmeticException("Can not swap row, there are no negative row indices");
double[] tmp = matrix[r1];
matrix[r1] = matrix[r2];
matrix[r2] = tmp;
}
@Override
public void zeroOut()
{
for(int i = 0; i < rows(); i++)
Arrays.fill(matrix[i], 0);
}
@Override
public Vec getRowView(int r)
{
return new DenseVector(matrix[r]);
}
@Override
public Matrix[] lup()
{
Matrix[] lup = new Matrix[3];
Matrix P = eye(rows());
DenseMatrix L;
DenseMatrix U = this;
//Initalization is a little wierd b/c we want to handle rectangular cases as well!
if(rows() > cols())//In this case, we will be changing U before returning it (have to make it smaller, but we can still avoid allocating extra space
L = new DenseMatrix(rows(), cols());
else
L = new DenseMatrix(rows(), rows());
for(int i = 0; i < U.rows(); i++)
{
//If rectangular, we still need to loop through to update ther est of L - even though we wont make many other changes
if(i < U.cols())
{
//Partial pivoting, find the largest value in this colum and move it to the top!
//Find the largest magintude value in the colum k, row j
int largestRow = i;
double largestVal = Math.abs(U.matrix[i][i]);
for (int j = i + 1; j < U.rows(); j++)
{
double rowJLeadVal = Math.abs(U.matrix[j][i]);
if (rowJLeadVal > largestVal)
{
largestRow = j;
largestVal = rowJLeadVal;
}
}
//SWAP!
U.swapRows(largestRow, i);
P.swapRows(largestRow, i);
L.swapRows(largestRow, i);
L.matrix[i][i] = 1;
}
//Seting up L
for(int k = 0; k < Math.min(i, U.cols()); k++)
{
double tmp = U.matrix[i][k]/U.matrix[k][k];
L.matrix[i][k] = Double.isNaN(tmp) ? 0.0 : tmp;
U.matrix[i][k] = 0;
for(int j = k+1; j < U.cols(); j++)
{
U.matrix[i][j] -= L.matrix[i][k]*U.matrix[k][j];
}
}
}
if(rows() > cols())//Clean up!
{
//We need to change U to a square nxn matrix in this case, we can safely drop the last 2 rows!
double[][] newU = new double[cols()][];
System.arraycopy(U.matrix, 0, newU, 0, newU.length);
U = new DenseMatrix(newU);//We have made U point at a new object, but the array is still pointing at the same rows!
}
lup[0] = L;
lup[1] = U;
lup[2] = P;
return lup;
}
private class LUProwRun implements Callable<Integer>
{
final DenseMatrix L;
final DenseMatrix U;
final int k, threadNumber;
double largestSeen = Double.MIN_VALUE;
int largestIndex ;
public LUProwRun(DenseMatrix L, DenseMatrix U, int k, int threadNumber)
{
this.L = L;
this.U = U;
this.k = k;
largestIndex = k+1;
this.threadNumber = threadNumber;
}
/**
* Returns the index of the row with the largest absolute value we ever saw in column k+1
*/
public Integer call() throws Exception
{
for(int i = k+1+threadNumber; i < U.rows(); i+=LogicalCores)
{
double tmp = U.matrix[i][k]/U.matrix[k][k];
L.matrix[i][k] = Double.isNaN(tmp) ? 0.0 : tmp;
//We perform the first iteration of the loop outside, as we want to cache its value for searching later
U.matrix[i][k+1] -= L.matrix[i][k]*U.matrix[k][k+1];
if(Math.abs(U.matrix[i][k+1]) > largestSeen)
{
largestSeen = Math.abs(U.matrix[i][k+1]);
largestIndex = i;
}
for(int j = k+2; j < U.cols(); j++)
{
U.matrix[i][j] -= L.matrix[i][k]*U.matrix[k][j];
}
}
return largestIndex;
}
}
@Override
public Matrix[] lup(ExecutorService threadPool)
{
Matrix[] lup = new Matrix[3];
Matrix P = eye(rows());
DenseMatrix L;
DenseMatrix U = this;
//Initalization is a little wierd b/c we want to handle rectangular cases as well!
if(rows() > cols())//In this case, we will be changing U before returning it (have to make it smaller, but we can still avoid allocating extra space
L = new DenseMatrix(rows(), cols());
else
L = new DenseMatrix(rows(), rows());
List<Future<Integer>> bigIndecies = new ArrayList<Future<Integer>>(LogicalCores);
for(int k = 0; k < Math.min(rows(), cols()); k++)
{
//Partial pivoting, find the largest value in this colum and move it to the top!
//Find the largest magintude value in the colum k, row j
int largestRow = k;
double largestVal = Math.abs(U.matrix[k][k]);
if(bigIndecies.isEmpty())
for(int j = k+1; j < U.rows(); j++)
{
double rowJLeadVal = Math.abs(U.matrix[j][k]);
if(rowJLeadVal > largestVal)
{
largestRow = j;
largestVal = rowJLeadVal;
}
}
else
{
for(Future<Integer> fut : bigIndecies)
{
try
{
int j = fut.get();
double rowJLeadVal = Math.abs(U.matrix[j][k]);
if(rowJLeadVal > largestVal)
{
largestRow = j;
largestVal = rowJLeadVal;
}
}
catch (InterruptedException ex)
{
Logger.getLogger(DenseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
catch (ExecutionException ex)
{
Logger.getLogger(DenseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
bigIndecies.clear();
}
//SWAP!
U.swapRows(largestRow, k);
P.swapRows(largestRow, k);
L.swapRows(largestRow, k);
L.matrix[k][k] = 1;
//Seting up L
for(int threadNumber = 0; threadNumber < LogicalCores; threadNumber++)
bigIndecies.add(threadPool.submit(new LUProwRun(L, U, k, threadNumber)));
}
//Zero out the bottom rows
for(int k = 0; k < Math.min(rows(), cols()); k++)
for(int j = 0; j < k; j++)
U.matrix[k][j] = 0;
if(rows() > cols())//Clean up!
{
//We need to change U to a square nxn matrix in this case, we can safely drop the last 2 columns!
double[][] newU = new double[cols()][];
System.arraycopy(U.matrix, 0, newU, 0, newU.length);
U = new DenseMatrix(newU);//We have made U point at a new object, but the array is still pointing at the same rows!
}
lup[0] = L;
lup[1] = U;
lup[2] = P;
return lup;
}
@Override
public Matrix[] qr()
{
int N = cols(), M = rows();
Matrix[] qr = new Matrix[2];
DenseMatrix Q = Matrix.eye(M);
DenseMatrix A;
if(isSquare())
{
mutableTranspose();
A = this;
}
else
A = (DenseMatrix) this.transpose();
int to = cols() > rows() ? M : N;
double[] vk = new double[M];
for(int k = 0; k < to; k++)
{
double[] A_k = A.matrix[k];
double vkNorm = initalVKNormCompute(k, M, vk, A_k);
double beta = vkNorm;
double vk_k = vk[k] = A_k[k];//force into register, help the JIT!
vkNorm += vk_k*vk_k;
vkNorm = sqrt(vkNorm);
double alpha = -signum(vk_k) * vkNorm;
vk_k -= alpha;
vk[k] = vk_k;
beta += vk_k*vk_k;
if(beta == 0)
continue;
double TwoOverBeta = 2.0/beta;
qrUpdateQ(Q, k, vk, TwoOverBeta);
qrUpdateR(k, N, A, vk, TwoOverBeta, M);
}
qr[0] = Q;
if(isSquare())
{
A.mutableTranspose();
qr[1] = A;
}
else
qr[1] = A.transpose();
return qr;
}
private class QRRun implements Runnable
{
DenseMatrix A, Q;
double[] vk;
double TwoOverBeta;
int k, threadID, N, M;
CountDownLatch latch;
public QRRun(DenseMatrix A, DenseMatrix Q, double[] vk, double TwoOverBeta, int k, int threadID, CountDownLatch latch)
{
this.A = A;
this.Q = Q;
this.vk = vk;
this.TwoOverBeta = TwoOverBeta;
this.k = k;
this.threadID = threadID;
this.latch = latch;
this.N = A.rows();
this.M = A.cols();
}
public void run()
{
//Computing Q
{
//We are computing Q' in what we are treating as the column major order, which represents Q in row major order, which is what we want!
for(int j = 0+threadID; j < Q.cols(); j+=LogicalCores)
{
double[] Q_j = Q.matrix[j];
double y = 0;//y = vk dot A_j
for (int i = k; i < Q.cols(); i++)
y += vk[i] * Q_j[i];
y *= TwoOverBeta;
for (int i = k; i < Q.rows(); i++)
{
Q_j[i] -= y*vk[i];
}
}
}
//First run of loop removed, as it will be setting zeros. More accurate to just set them ourselves
if(k < N && threadID == 0)
{
qrUpdateRFirstIteration(A, k, vk, TwoOverBeta, M);
}
//The rest of the normal look
for(int j = k+1+threadID; j < N; j+=LogicalCores)
{
double[] A_j = A.matrix[j];
double y = 0;//y = vk dot A_j
for(int i = k; i < A.cols(); i++)
y += vk[i]*A_j[i];
y *= TwoOverBeta;
for(int i = k; i < M; i++)
A_j[i] -= y*vk[i];
}
latch.countDown();
}
}
@Override
public Matrix[] qr(ExecutorService threadPool)
{
int N = cols(), M = rows();
Matrix[] qr = new Matrix[2];
DenseMatrix Q = Matrix.eye(M);
DenseMatrix A;
if(isSquare())
{
mutableTranspose();
A = this;
}
else
A = (DenseMatrix) this.transpose();
double[] vk = new double[M];
int to = cols() > rows() ? M : N;
for(int k = 0; k < to; k++)
{
double[] A_k = A.matrix[k];
double vkNorm = initalVKNormCompute(k, M, vk, A_k);
double beta = vkNorm;
double vk_k = vk[k] = A_k[k];
vkNorm += vk_k*vk_k;
vkNorm = sqrt(vkNorm);
double alpha = -signum(vk_k) * vkNorm;
vk_k -= alpha;
beta += vk_k*vk_k;
vk[k] = vk_k;
if(beta == 0)
continue;
double TwoOverBeta = 2.0/beta;
CountDownLatch latch = new CountDownLatch(LogicalCores);
for(int threadID = 0; threadID < LogicalCores; threadID++)
threadPool.submit(new QRRun(A, Q, vk, TwoOverBeta, k, threadID, latch));
try
{
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(DenseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
qr[0] = Q;
if(isSquare())
{
A.mutableTranspose();
qr[1] = A;
}
else
qr[1] = A.transpose();
return qr;
}
@Override
public DenseMatrix clone()
{
DenseMatrix copy = new DenseMatrix(rows(), cols());
for(int i = 0; i < matrix.length; i++)
System.arraycopy(matrix[i], 0, copy.matrix[i], 0, matrix[i].length);
return copy;
}
}
| 34,248 | 32.187016 | 168 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.