repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
JSAT | JSAT-master/JSAT/src/jsat/linear/DenseVector.java |
package jsat.linear;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import static java.lang.Math.*;
import java.util.Arrays;
import java.util.List;
/**
* A vector implementation that is dense, meaning all values are allocated -
* even if their values will be implicitly zero.
*
* @author Edward Raff
*/
public class DenseVector extends Vec
{
private static final long serialVersionUID = -889493251793828933L;
protected double[] array;
private int startIndex;
private int endIndex;
/**
* Creates a new Dense Vector of zeros
* @param length the length of the vector
*/
public DenseVector(int length)
{
if(length < 0)
throw new ArithmeticException("You can not have a negative dimension vector");
array = new double[length];
startIndex = 0;
endIndex = array.length;
}
/**
* Creates a new vector of the length of the given list, and values copied
* over in order.
*
* @param list the list of values to copy into a new vector
*/
public DenseVector(List<Double> list)
{
this.array = new double[list.size()];
for(int i = 0; i < list.size(); i++)
this.array[i] = list.get(i);
startIndex = 0;
endIndex = this.array.length;
}
/**
* Creates a new Dense Vector that uses the given array as its values. Its
* values will not be copied, and raw access and mutations tot he given
* array may occur.
*
* @param array the backing array to use for a new vector of the same length
*/
public DenseVector(double[] array)
{
this(array, 0, array.length);
}
/**
* Creates a new Dense Vector that uses the given array as its values. Its
* values will not be copied, and raw access and mutations tot he given
* array may occur.
*
* @param array the backing array to use for a new vector
* @param start the first index in the array, inclusive, to mark the start
* of the vector.
* @param end the last index in the array, exclusive, to mark the end of the
* vector.
*/
public DenseVector(double[] array, int start, int end)
{
this.array = array;
this.startIndex = start;
this.endIndex = end;
}
/**
* Creates a new Dense Vector that contains a copy of the values in the
* given vector
* @param toCopy the vector to copy
*/
public DenseVector(Vec toCopy)
{
this(toCopy.length());
for(IndexValue iv : toCopy)
set(iv.getIndex(), iv.getValue());
}
@Override
public int length()
{
return (endIndex-startIndex);
}
@Override
public double get(int index)
{
return array[index+startIndex];
}
@Override
public void set(int index, double val)
{
array[index+startIndex] = val;
}
@Override
public double min()
{
double result = array[startIndex];
for(int i = startIndex+1; i < endIndex; i++)
result = Math.min(result, array[i]);
return result;
}
@Override
public double max()
{
double result = array[startIndex];
for(int i = startIndex+1; i < endIndex; i++)
result = Math.max(result, array[i]);
return result;
}
@Override
public double sum()
{
/*
* Uses Kahan summation algorithm, which is more accurate then
* naively summing the values in floating point. Though it
* does not guarenty the best possible accuracy
*
* See: http://en.wikipedia.org/wiki/Kahan_summation_algorithm
*/
double sum = 0;
double c = 0;
for(int i = startIndex; i < endIndex; i++)
{
double y = array[i] - c;
double t = sum+y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
@Override
public double median()
{
double[] copy = Arrays.copyOfRange(array, startIndex, endIndex);
Arrays.sort(copy);
if(copy.length % 2 == 1)
return copy[copy.length/2];
else
return copy[copy.length/2]/2+copy[copy.length/2+1]/2;//Divisions by 2 then add is more numericaly stable
}
@Override
public double skewness()
{
double mean = mean();
double tmp = 0;
for(int i = startIndex; i < endIndex; i++)
tmp += pow(array[i]-mean, 3);
double s1 = tmp / (pow(standardDeviation(), 3) * (array.length-1) );
if(array.length >= 3)//We can use the bias corrected formula
return sqrt(array.length*(array.length-1))/(array.length-2)*s1;
return s1;
}
@Override
public double kurtosis()
{
double mean = mean();
double tmp = 0;
for(int i = startIndex; i < endIndex; i++)
tmp += pow(array[i]-mean, 4);
tmp /= length();
return tmp / pow(variance(), 2) - 3;
}
@Override
public DenseVector sortedCopy()
{
double[] copy = Arrays.copyOfRange(array, startIndex, endIndex);
Arrays.sort(copy);
return new DenseVector(copy);
}
@Override
public double variance()
{
double mu = mean();
double tmp = 0;
double N = length();
for(int i = startIndex; i < endIndex; i++)
tmp += pow(array[i]-mu, 2)/N;
return tmp;
}
@Override
public double dot(Vec v)
{
if(this.length() != v.length())
throw new ArithmeticException("Vectors must have the same length");
if(v.isSparse())
return v.dot(this);
double dot = 0;
for(int i = startIndex; i < endIndex; i++)
dot += array[i] * v.get(i-startIndex);
return dot;
}
public DenseVector deepCopy()
{
return new DenseVector(Arrays.copyOf(array, array.length));
}
@Override
public void multiply(double c, Matrix A, Vec b)
{
if(this.length() != A.rows())
throw new ArithmeticException("Vector x Matrix dimensions do not agree [1," + this.length() + "] x [" + A.rows() + ", " + A.cols() + "]");
if(b.length() != A.cols())
throw new ArithmeticException("Destination vector is not the right size");
for(int i = 0; i < this.length(); i++)
{
double this_i = c*this.array[i+this.startIndex];
for(int j = 0; j < A.cols(); j++)
b.increment(j, this_i*A.get(i, j));
}
}
@Override
public void mutableAdd(double c)
{
for(int i = startIndex; i < endIndex; i++)
array[i] += c;
}
@Override
public void mutableAdd(double c, Vec b)
{
if(this.length() != b.length())
throw new ArithmeticException("Can not add vectors of unequal length");
if (b.isSparse())
for (IndexValue iv : b)
array[iv.getIndex()] += c * iv.getValue();
else
for (int i = startIndex; i < endIndex; i++)
array[i] += c * b.get(i);
}
@Override
public void mutableSubtract(double c)
{
for(int i = startIndex; i < endIndex; i++)
array[i] -= c;
}
@Override
public void mutableMultiply(double c)
{
for(int i = startIndex; i < endIndex; i++)
array[i] *= c;
}
@Override
public void mutableDivide(double c)
{
for(int i = startIndex; i < endIndex; i++)
array[i] /= c;
}
@Override
public double pNormDist(double p, Vec y)
{
if(this.length() != y.length())
throw new ArithmeticException("Vectors must be of the same length");
double norm = 0;
if(y.isSparse())
{
int lastIndx = -1;
for(IndexValue iv : y)
{
for(int i = lastIndx+1; i < iv.getIndex(); i++)//add all the indecies we skipped
norm += Math.pow(Math.abs(array[i]), p);
lastIndx = iv.getIndex();
//add current
norm += Math.pow(Math.abs(array[iv.getIndex()]-iv.getValue()), p);
}
//Tailing zeros
for(int i = lastIndx+1; i < y.length(); i++)
norm += Math.pow(Math.abs(array[i]), p);
}
else
{
for(int i = startIndex; i < endIndex; i++)
norm += Math.pow(Math.abs(array[i]-y.get(i)), p);
}
return Math.pow(norm, 1.0/p);
}
@Override
public double pNorm(double p)
{
if (p <= 0)
throw new IllegalArgumentException("norm must be a positive value, not " + p);
double result = 0;
if (p == 1)
{
for (int i = startIndex; i < endIndex; i++)
result += abs(array[i]);
}
else if (p == 2)
{
for(int i = startIndex; i < endIndex; i++)
result += array[i] * array[i];
result = Math.sqrt(result);
}
else if (Double.isInfinite(p))
{
for(int i = startIndex; i < endIndex; i++)
result = Math.max(result, abs(array[i]));
}
else
{
for(int i = startIndex; i < endIndex; i++)
result += Math.pow(Math.abs(array[i]), p);
result = pow(result, 1 / p);
}
return result;
}
@Override
public DenseVector clone()
{
DenseVector copy = new DenseVector(length());
System.arraycopy(this.array, startIndex, copy.array, 0, length());
return copy;
}
@Override
public void normalize()
{
double sum = 0;
for(int i = startIndex; i < endIndex; i++)
sum += array[i]*array[i];
sum = Math.sqrt(sum);
mutableDivide(Math.max(sum, 1e-10));
}
@Override
public void mutablePairwiseMultiply(Vec b)
{
if(this.length() != b.length())
throw new ArithmeticException("Vectors must have the same length");
for(int i = startIndex; i < endIndex; i++)
this.array[i] *= b.get(i);
}
@Override
public void mutablePairwiseDivide(Vec b)
{
if(this.length() != b.length())
throw new ArithmeticException("Vectors must have the same length");
for(int i = startIndex; i < endIndex; i++)
this.array[i] /= b.get(i);
}
@Override
public boolean equals(Object obj)
{
if(!(obj instanceof Vec))
return false;
Vec otherVec = (Vec) obj;
if(this.length() != otherVec.length())
return false;
for(int i = startIndex; i < endIndex; i++)
if (this.get(i) != otherVec.get(i))
if (Double.isNaN(this.get(i)) && Double.isNaN(otherVec.get(i)))//NaN != NaN is always true, so check special
return true;
else
return false;
return true;
}
@Override
public boolean equals(Object obj, double range)
{
if(!(obj instanceof Vec))
return false;
Vec otherVec = (Vec) obj;
range = Math.abs(range);
if(this.length() != otherVec.length())
return false;
for(int i = startIndex; i < endIndex; i++)
if(Math.abs(this.get(i)-otherVec.get(i)) > range)
if (Double.isNaN(this.get(i)) && Double.isNaN(otherVec.get(i)))//NaN != NaN is always true, so check special
return true;
else
return false;
return true;
}
/**
* Returns a new dense vector backed by the given array. This is a weak
* reference, the given array should no longer be altered - as it will
* effect the values of the dense vector.
*
* @param array the array to use as the backing of a dense vector
* @return a Dense Vector that is backed using the given array
*/
public static DenseVector toDenseVec(double... array)
{
return new DenseVector(array);
}
@Override
public double[] arrayCopy()
{
return Arrays.copyOfRange(array, startIndex, endIndex);
}
@Override
public boolean isSparse()
{
return false;
}
@Override
public void setLength(int newLength)
{
if(newLength < 0)
throw new ArithmeticException("Can not create an array of negative length");
if(newLength > length())
{
array = Arrays.copyOf(array, startIndex + newLength);
endIndex = startIndex + newLength;
}
if(newLength < length())//make sure we aren't destroying anything
{
for(int i = newLength; i < length(); i++)
if(get(i) != 0)
throw new RuntimeException("Can't decrease the length of this vector from " + length() + " to " + newLength + " due to non-zero value");
array = Arrays.copyOfRange(array, startIndex, startIndex + newLength);
startIndex = 0;
endIndex = newLength;
}
}
private void readObject(ObjectInputStream in) throws ClassNotFoundException, IOException
{
this.array = new double[in.readInt()];
this.startIndex = 0;
this.endIndex = this.array.length;
for(int i = 0; i < this.length(); i++)
this.array[i] = in.readDouble();
}
private void writeObject(ObjectOutputStream out) throws IOException
{
out.writeInt(this.length());
for(int i = 0; i < this.length(); i++)
out.writeDouble(this.get(i));
}
}
| 13,966 | 26.332681 | 150 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/EigenValueDecomposition.java |
package jsat.linear;
import java.io.Serializable;
import static java.lang.Math.*;
import java.util.Arrays;
import java.util.Comparator;
import jsat.math.Complex;
import jsat.utils.DoubleList;
import jsat.utils.IndexTable;
/**
* Class for performing the Eigen Value Decomposition of a matrix. The EVD of a
* real matrix may contain complex results. When this occurs, the EVD is less
* useful since JSAT only supports real matrices. The
* {@link SingularValueDecomposition} of a real matrix is always real, and may
* be more useful.
* <br><br>
* Implementation adapted from the Public Domain work
* of <a href="http://math.nist.gov/javanumerics/jama/"> JAMA: A Java Matrix
* Package</a>
* <br><br>
* If A is symmetric, then A = V*D*V' where the eigenvalue
* matrix D is diagonal and the eigenvector matrix V is orthogonal. V * V' equals the
* identity matrix.
* <br><br>
* If A is not symmetric, then the eigenvalue matrix D
* is block diagonal with the real eigenvalues in 1-by-1 blocks and any complex
* eigenvalues, lambda + i*mu, in 2-by-2 blocks, [lambda, mu; -mu, lambda]. The
* columns of V represent the eigenvectors in the sense that A*V = V*D.
* The matrix V may be badly conditioned, or even
* singular, so the validity of the equation A = V*D*inverse(V) depends upon
* the condition of V. <br>
* If there are no complex eigen values, which can be checked using
* {@link #isComplex() }, then D is a normal diagonal matrix.
*
* @author Edward Raff
*/
public class EigenValueDecomposition implements Serializable
{
private static final long serialVersionUID = -7169205761148043008L;
/**
* Row and column dimension (square matrix).
*
* @serial matrix dimension.
*/
private int n;
/**
* Arrays for internal storage of eigenvalues.
*
* @serial internal storage of eigenvalues.
*/
private double[] d, e;
/**
* Array for internal storage of eigenvectors.
*
* @serial internal storage of eigenvectors.
*/
private Matrix V;
/**
* Array for internal storage of nonsymmetric Hessenberg form.
*
* @serial internal storage of nonsymmetric Hessenberg form.
*/
private Matrix H;
/**
* Used to indicate if the result contains complex eigen values
*/
private boolean complexResult;
/**
* Symmetric Householder reduction to tridiagonal form.
*/
private void tred2()
{
for(int j = 0; j < n; j++)
d[j] = V.get(n-1, j);
// Householder reduction to tridiagonal form.
for (int i = n - 1; i > 0; i--)
{
// Scale to avoid under/overflow.
double scale = 0.0;
double h = 0.0;
for (int k = 0; k < i; k++)
{
scale = scale + abs(d[k]);
}
if (scale == 0.0)
{
e[i] = d[i - 1];
for (int j = 0; j < i; j++)
{
d[j] = V.get(i-1, j);
V.set(i, j, 0.0);
V.set(j, i, 0.0);
}
}
else
{
// Generate Householder vector.
for (int k = 0; k < i; k++)
{
d[k] /= scale;
h += d[k] * d[k];
}
double f = d[i - 1];
double g = sqrt(h);
if (f > 0)
g = -g;
e[i] = scale * g;
h -= f * g;
d[i - 1] = f - g;
Arrays.fill(e, 0, i, 0.0);
// Apply similarity transformation to remaining columns.
for (int j = 0; j < i; j++)
{
f = d[j];
V.set(j, i, f);
g = e[j] + V.get(j, j) * f;
for (int k = j + 1; k <= i - 1; k++)
{
g += V.get(k,j) * d[k];
e[k] += V.get(k, j) * f;
}
e[j] = g;
}
f = 0.0;
for (int j = 0; j < i; j++)
{
e[j] /= h;
f += e[j] * d[j];
}
double hh = f / (h + h);
for (int j = 0; j < i; j++)
{
e[j] -= hh * d[j];
}
for (int j = 0; j < i; j++)
{
f = d[j];
g = e[j];
for (int k = j; k <= i - 1; k++)
{
V.increment(k, j, -(f * e[k] + g * d[k]));
}
d[j] = V.get(i-1, j);
V.set(i, j, 0.0);
}
}
d[i] = h;
}
// Accumulate transformations.
for (int i = 0; i < n - 1; i++)
{
V.set(n-1, i, V.get(i, i));
V.set(i, i, 1.0);
double h = d[i + 1];
if (h != 0.0)
{
for (int k = 0; k <= i; k++)
{
d[k] = V.get(k, i+1) / h;
}
for (int j = 0; j <= i; j++)
{
double g = 0.0;
for (int k = 0; k <= i; k++)
{
g += V.get(k, i+1) * V.get(k, j);
}
RowColumnOps.addMultCol(V, j, 0, i+1, -g, d);
}
}
RowColumnOps.fillCol(V, i+1, 0, i+1, 0.0);
}
for (int j = 0; j < n; j++)
{
d[j] = V.get(n-1, j);
V.set(n-1, j, 0.0);
}
V.set(n-1, n-1, 1.0);
e[0] = 0.0;
}
/**
* Symmetric tridiagonal QL algorithm.
*/
private void tql2()
{
// This is derived from the Algol procedures tql2, by
// Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
// Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
// Fortran subroutine in EISPACK.
for (int i = 1; i < n; i++)
{
e[i - 1] = e[i];
}
e[n - 1] = 0.0;
double f = 0.0;
double tst1 = 0.0;
double eps = pow(2.0, -52.0);
for (int l = 0; l < n; l++)
{
// Find small subdiagonal element
tst1 = max(tst1, abs(d[l]) + abs(e[l]));
int m = l;
while (m < n)
{
if (abs(e[m]) <= eps * tst1)
{
break;
}
m++;
}
// If m == l, d[l] is an eigenvalue,
// otherwise, iterate.
if (m > l)
{
int iter = 0;
do
{
iter = iter + 1; // (Could check iteration count here.)
// Compute implicit shift
double g = d[l];
double p = (d[l + 1] - g) / (2.0 * e[l]);
double r = hypot(p, 1.0);
if (p < 0)
{
r = -r;
}
d[l] = e[l] / (p + r);
d[l + 1] = e[l] * (p + r);
double dl1 = d[l + 1];
double h = g - d[l];
for (int i = l + 2; i < n; i++)
{
d[i] -= h;
}
f = f + h;
// Implicit QL transformation.
p = d[m];
double c = 1.0;
double c2 = c;
double c3 = c;
double el1 = e[l + 1];
double s = 0.0;
double s2 = 0.0;
for (int i = m - 1; i >= l; i--)
{
c3 = c2;
c2 = c;
s2 = s;
g = c * e[i];
h = c * p;
r = hypot(p, e[i]);
e[i + 1] = s * r;
s = e[i] / r;
c = p / r;
p = c * d[i] - s * g;
d[i + 1] = h + s * (c * g + s * d[i]);
// Accumulate transformation.
columnOpTransform(V, 0, n - 1, i, c, s, 1);
}
p = -s * s2 * c3 * el1 * e[l] / dl1;
e[l] = s * p;
d[l] = c * p;
// Check for convergence.
}
while (abs(e[l]) > eps * tst1);
}
d[l] = d[l] + f;
e[l] = 0.0;
}
// Sort eigenvalues and corresponding vectors.
for (int i = 0; i < n - 1; i++)
{
int k = i;
double p = d[i];
for (int j = i + 1; j < n; j++)
{
if (d[j] < p)
{
k = j;
p = d[j];
}
}
if (k != i)
{
d[k] = d[i];
d[i] = p;
RowColumnOps.swapCol(V, i, k);
}
}
}
/**
* Nonsymmetric reduction to Hessenberg form.
*/
private void orthes()
{
final double[] ort = new double[n];
// This is derived from the Algol procedures orthes and ortran,
// by Martin and Wilkinson, Handbook for Auto. Comp.,
// Vol.ii-Linear Algebra, and the corresponding
// Fortran subroutines in EISPACK.
int low = 0;
int high = n - 1;
for (int m = low + 1; m <= high - 1; m++)
{
// Scale column.
double scale = 0.0;
for (int i = m; i <= high; i++)
scale = scale + abs(H.get(i, m-1));
if (scale != 0.0)
{
// Compute Householder transformation.
double h = 0.0;
double tmp;
for (int i = high; i >= m; i--)
{
ort[i] = tmp = H.get(i, m-1) / scale;
h += tmp*tmp;
}
double g = sqrt(h);
if ((tmp = ort[m]) > 0)
g = -g;
h = h - tmp * g;
ort[m] = tmp - g;
orthesApplyHouseholder(m, high, ort, h);
ort[m] *= scale;
H.set(m, m-1, scale*g);
}
}
// Accumulate transformations (Algol's ortran).
for (int j = 0; j < n; j++)
{
for (int i = 0; i < n; i++)
{
V.set(i, j, (i == j ? 1.0 : 0.0));
}
}
orthesAccumulateTransforamtions(high, low, ort);
}
/**
* Nonsymmetric reduction from Hessenberg to real Schur form.
*/
private void hqr2()
{
// This is derived from the Algol procedure hqr2,
// by Martin and Wilkinson, Handbook for Auto. Comp.,
// Vol.ii-Linear Algebra, and the corresponding
// Fortran subroutine in EISPACK.
// Initialize
int nn = this.n;
int n = nn - 1;
int low = 0;
int high = nn - 1;
double eps = pow(2.0, -52.0);
double exshift = 0.0;
double p = 0, q = 0, r = 0, s = 0, z = 0, t, w, x, y;
/**
* Output from complex division
*/
final double[] cr = new double[2];
double norm = hqr2GetNormStart(nn, low, high);
// Outer loop over eigenvalue index
int iter = 0;
while (n >= low)
{
// Look for single small sub-diagonal element
int l = n;
while (l > low)
{
s = abs(H.get(l-1, l-1)) + abs(H.get(l, l));
if (s == 0.0)
{
s = norm;
}
if (abs(H.get(l, l-1)) < eps * s)
{
break;
}
l--;
}
// Check for convergence
// One root found
if (l == n)
{
H.increment(n, n, exshift);
d[n] = H.get(n, n);
e[n] = 0.0;
n--;
iter = 0;
}
else if (l == n - 1) // Two roots found
{
hqr2FoundTwoRoots(exshift, n, nn, low, high);
n = n - 2;
iter = 0;
// No convergence yet
}
else
{
// Form shift
x = H.get(n, n);
y = 0.0;
w = 0.0;
if (l < n)
{
y = H.get(n-1, n-1);
w = pow(H.get(n, n-1), 2);
}
// Wilkinson's original ad hoc shift
if (iter == 10)
{
exshift += x;
RowColumnOps.addDiag(H, low, n+1, -x);
s = abs(H.get(n, n-1)) + abs(H.get(n-1, n-2));
x = y = 0.75 * s;
w = -0.4375 * s * s;
}
// MATLAB's new ad hoc shift
if (iter == 30)
{
s = (y - x) / 2.0;
s = s * s + w;
if (s > 0)
{
s = sqrt(s);
if (y < x)
{
s = -s;
}
s = x - w / ((y - x) / 2.0 + s);
RowColumnOps.addDiag(H, low, n+1, -s);
exshift += s;
x = y = w = 0.964;
}
}
iter = iter + 1; // (Could check iteration count here.)
// Look for two consecutive small sub-diagonal elements
int m = n - 2;
while (m >= l)
{
z = H.get(m, m);
r = x - z;
s = y - z;
p = (r * s - w) / H.get(m+1, m) + H.get(m, m+1);
q = H.get(m+1, m+1) - z - r - s;
r = H.get(m+2, m+1);
s = abs(p) + abs(q) + abs(r);
p = p / s;
q = q / s;
r = r / s;
if (m == l)
{
break;
}
if (abs(H.get(m, m-1)) * (abs(q) + abs(r))
< eps * (abs(p) * (abs(H.get(m-1, m-1)) + abs(z)
+ abs(H.get(m+1, m+1)))))
{
break;
}
m--;
}
for (int i = m + 2; i <= n; i++)
{
H.set(i, i-2, 0.0);
if (i > m + 2)
{
H.set(i, i-3, 0.0);
}
}
// Double QR step involving rows l:n and columns m:n
for (int k = m; k <= n - 1; k++)
{
boolean notlast = (k != n - 1);
if (k != m)
{
p = H.get(k, k-1);
q = H.get(k+1, k-1);
r = (notlast ? H.get(k+2, k-1) : 0.0);
x = abs(p) + abs(q) + abs(r);
if (x != 0.0)
{
p = p / x;
q = q / x;
r = r / x;
}
}
if (x == 0.0)
break;
s = sqrt(p * p + q * q + r * r);
if (p < 0)
{
s = -s;
}
if (s != 0)
{
if (k != m)
{
H.set(k, k-1, -s*x);
}
else if (l != m)
{
H.set(k, k-1, -H.get(k, k-1));
}
p = p + s;
x = p / s;
y = q / s;
z = r / s;
q = q / p;
r = r / p;
// Row modification
rowOpTransform2(H, k, nn - 1, x, k, y, notlast, z, r, q);
// Column modification
columnOpTransform2(H, 0, min(n, k + 3), x, k, y, notlast, z, r, q);
// Accumulate transformations
columnOpTransform2(V, low, high, x, k, y, notlast, z, r, q);
} // (s != 0)
} // k loop
} // check convergence
} // while (n >= low)
// Backsubstitute to find vectors of upper triangular form
if (norm == 0.0)
return;
backsubtituteFindVectors(nn, z, s, eps, norm, cr);
// Vectors of isolated roots
for (int i = 0; i < nn; i++)
if (i < low | i > high)
{
for(int j = i; j < nn-1; j++)
H.set(i, j, V.get(i, j));
}
backtransform(nn, low, high);
}
/**
* Creates a new new Eigen Value Decomposition. The input matrix will not be
* altered. If the input is symmetric, a more efficient algorithm will be
* used.
*
* @param A the square matrix to work on.
*/
public EigenValueDecomposition(Matrix A)
{
this(A, 1e-15);
}
/**
* Creates a new new Eigen Value Decomposition. The input matrix will not be
* altered. If the input is symmetric, a more efficient algorithm will be
* used.
*
* @param A the square matrix to work on.
* @param eps the numerical tolerance for differences in value to be
* considered the same.
*/
public EigenValueDecomposition(Matrix A, double eps)
{
if (!A.isSquare())
throw new ArithmeticException("");
n = A.cols();
d = new double[n];
e = new double[n];
if (Matrix.isSymmetric(A, eps) )
{
//Would give it the transpose, but the input is symmetric. So its the same thing
Matrix VWork = A.clone();
V = new TransposeView(VWork);
// Tridiagonalize.
tred2();
// Diagonalize.
tql2();
V = VWork.transpose();//Place back
complexResult = false;
}
else
{
Matrix HWork = A.transpose();
H = new TransposeView(HWork);
Matrix VWork = new DenseMatrix(n, n);
V = new TransposeView(VWork);
// Reduce to Hessenberg form.
orthes();
// Reduce Hessenberg to real Schur form.
hqr2();
complexResult = false;
//Check if the result has complex eigen values
for (int i = 0; i < n; i++)
if (e[i] != 0)
complexResult = true;
V = VWork.transpose();
}
}
/**
* Sorts the eigen values and the corresponding eigenvector columns by the
* associated eigen value. Sorting can not occur if complex values are
* present.
* @param cmp the comparator to use to sort the eigen values
*/
public void sortByEigenValue(Comparator<Double> cmp)
{
if(isComplex())
throw new ArithmeticException("Eigen values can not be sorted due to complex results");
IndexTable it = new IndexTable(DoubleList.unmodifiableView(d, d.length), cmp);
for(int i = 0; i < d.length; i++)
{
RowColumnOps.swapCol(V, i, it.index(i));
double tmp = d[i];
d[i] = d[it.index(i)];
d[it.index(i)] = tmp;
it.swap(i, it.index(i));
}
}
/**
* Return a copy of the eigenvector matrix
*
* @return the eigen vector matrix
*/
public Matrix getV()
{
return V.clone();
}
/**
* Returns the raw eigenvector matrix. Modifying this matrix will effect
* others using the same matrix.
* @return the eigen vector matrix
*/
public Matrix getVRaw()
{
return V;
}
/**
* Returns a copy of the transposed eigenvector matrix.
* @return the transposed eigen the eigen vector matrix
*/
public Matrix getVT() {
return V.transpose();
}
/**
* Return the real parts of the eigenvalues
*
* @return real(diag(D))
*/
public double[] getRealEigenvalues()
{
return d;
}
/**
* Return the imaginary parts of the eigenvalues
*
* @return imag(diag(D))
*/
public double[] getImagEigenvalues()
{
return e;
}
/**
* Updates the columns of the matrix M such that <br><br>
* <code><br>
* for (int i = low; i <= high; i++)<br>
* {<br>
* z = M[i][n+shift];<br>
* M[i][n+shift] = q * z + p * M[i][n];<br>
* M[i][n] = q * M[i][n] - p * z;<br>
* }<br>
* </code>
*
* @param M the matrix to alter
* @param low the starting column (inclusive)
* @param high the ending column (inclusive)
* @param n the column to alter, and the preceding column will be altered as
* well
* @param q first constant
* @param p second constant
* @param shift the direction to perform the computation. Either 1 for after
* the current column, or -1 for before the current column.
*/
private static void columnOpTransform(Matrix M, int low, int high, int n, double q, double p, int shift)
{
double z;
for (int i = low; i <= high; i++)
{
z = M.get(i, n+shift);
M.set(i, n+shift, q * z + p * M.get(i, n));
M.set(i, n, q * M.get(i, n) - p * z);
}
}
/**
* Updates the rows of the matrix M such that
* <br>
* M[n-1][j] = q * M[n-1][j] + p * M[n][j] <br>
* simultaneously altering <br>
* M[n][j] = q * M[n][j] - p * M[n-1][j] <br>
* as if M[n-1][j] had not been altered
*
* @param M the matrix to alter
* @param low the starting row (inclusive)
* @param high the ending row (inclusive)
* @param n the row to alter, and the preceding row will be altered as well
* @param q the first constant
* @param p the second constant
*/
private static void rowOpTransform(Matrix M, int low, int high, int n, double q, double p)
{
double z;
for (int j = low; j <= high; j++)
{
z = M.get(n-1, j);
M.set(n - 1, j, q * z + p * M.get(n, j));
M.set(n, j, q * M.get(n, j) - p * z);
}
}
/**
* Alters the columns accordin to <br>
* <code><p>
* for (int i = low; i <= high; i++)<br>
* {<br>
* p = x * M[i][k] + y * M[i][k + 1];<br>
* if (notlast)<br>
* {<br>
* p = p + z * M[i][k + 2];<br>
* M[i][k + 2] = M[i][k + 2] - p * r;<br>
* }<br>
* M[i][k] = M[i][k] - p;<br>
* M[i][k + 1] = M[i][k + 1] - p * q;<br>
* }<br>
* </p></code>
*
*
* @param M the matrix to alter
* @param low the starting column (inclusive)
* @param high the ending column (inclusive)
* @param x first constant
* @param k this column and the column after will be altered
* @param y second constant
* @param notlast <tt>true<tt> if the 2nd column after <tt>k</tt> should be updated
* @param z third constant
* @param r fourth constant
* @param q fifth constant
*/
private void columnOpTransform2(Matrix M, int low, int high, double x, int k, double y, boolean notlast, double z, double r, double q)
{
double p;
for (int i = low; i <= high; i++)
{
p = x * M.get(i, k) + y * M.get(i, k+1);
if (notlast)
{
p = p + z * M.get(i, k+2);
M.set(i, k + 2, M.get(i, k+2) - p * r);
}
M.increment(i, k, -p);
M.increment(i, k+1, -p*q);
}
}
/**
* Alters the rows of the matrix M according to
* <code><br>
* for (int j = low; j <= high; j++)
* {<br>
* p = M[k][j] + q * M[k + 1][j];<br>
* if (notlast)<br>
* {<br>
* p = p + r * M[k + 2][j];<br>
* M[k + 2][j] = M[k + 2][j] - p * z;<br>
* }<br>
* M[k][j] = M[k][j] - p * x;<br>
* M[k + 1][j] = M[k + 1][j] - p * y;<br>
* }
* </code>
* @param M the matrix to alter
* @param low the starting column (inclusive)
* @param high the ending column (inclusive)
* @param x first constant
* @param k this row and the row after will be altered
* @param y second constant
* @param notlast <tt>true<tt> if the 2nd row after <tt>k</tt> should be updated
* @param z third constant
* @param r fourth constant
* @param q fifth constant
*/
private void rowOpTransform2(Matrix M, int low, int high, double x, int k, double y, boolean notlast, double z, double r, double q)
{
double p;
for (int j = low; j <= high; j++)
{
p = M.get(k, j) + q * M.get(k + 1,j);
if (notlast)
{
p = p + r * M.get(k + 2,j);
M.set(k + 2,j, M.get(k+2, j) - p * z);
}
M.increment(k, j, -p*x);
M.increment(k+1, j, -p*y);
}
}
/**
* Return the block diagonal eigenvalue matrix
*
* @return D
*/
public Matrix getD()
{
Matrix X = new DenseMatrix(n, n);
for (int i = 0; i < n; i++)
{
X.set(i, i, d[i]);
if (e[i] > 0)
X.set(i, i+1, e[i]);
else if (e[i] < 0)
X.set(i, i-1, e[i]);
}
return X;
}
/**
* Indicates wether or not the EVD contains complex eigen values. Because
* EVD works with real matrices, the complex eigen vectors are lost - and
* the complex eigen values are in the off diagonal spaces of the D matrix.
*
* @return <tt>true</tt> if the EVD results in complex eigen values.
*/
public boolean isComplex()
{
return complexResult;
}
private void hqr2SolveComplexEigenEquation(final int i, final double p,
final double q, final double eps,
final double norm, final double w,
final double z, final double r,
final double ra, final double sa,
final double s, final double[] cr,
final int n)
{
double x;
double y;
double vr;
double vi;
// Solve complex equations
x = H.get(i, i+1);
y = H.get(i+1, i);
vr = (d[i] - p) * (d[i] - p) + e[i] * e[i] - q * q;
vi = (d[i] - p) * 2.0 * q;
if (vr == 0.0 & vi == 0.0)
{
vr = eps * norm * (abs(w) + abs(q)
+ abs(x) + abs(y) + abs(z));
}
Complex.cDiv(x * r - z * ra + q * sa, x * s - z * sa - q * ra, vr, vi, cr);
H.set(i, n-1, cr[0]);
H.set(i, n, cr[1]);
if (abs(x) > (abs(z) + abs(q)))
{
H.set(i+1, n-1, (-ra - w * H.get(i, n-1) + q * H.get(i, n)) / x);
H.set(i+1, n, (-sa - w * H.get(i, n) - q * H.get(i, n-1)) / x);
}
else
{
Complex.cDiv(-r - y * H.get(i, n-1), -s - y * H.get(i, n), z, q, cr);
H.set(i+1, n-1, cr[0]);
H.set(i+1, n, cr[1]);
}
}
private void backsubtituteFindVectors(int nn, double z, double s, double eps, double norm, final double[] cr)
{
double p;
double q;
double w;
double r = 0;
double x;
double y;
double t;
for (int n = nn - 1; n >= 0; n--)
{
p = d[n];
q = e[n];
// Real vector
if (q == 0)
{
int l = n;
H.set(n, n, 1.0);
for (int i = n - 1; i >= 0; i--)
{
w = H.get(i, i) - p;
r = 0.0;
for (int j = l; j <= n; j++)
{
r = r + H.get(i, j) * H.get(j, n);
}
if (e[i] < 0.0)
{
z = w;
s = r;
}
else
{
l = i;
if (e[i] == 0.0)
{
if (w != 0.0)
{
H.set(i, n, -r / w);
}
else
{
H.set(i, n, -r/(eps*norm));
}
// Solve real equations
}
else
{
x = H.get(i, i+1);
y = H.get(i+1, i);
q = (d[i] - p) * (d[i] - p) + e[i] * e[i];
t = (x * s - z * r) / q;
H.set(i, n, t);
if (abs(x) > abs(z))
{
H.set(i+1, n, (-r-w*t)/x);
}
else
{
H.set(i+1, n, (-s - y * t) / z);
}
}
// Overflow control
t = abs(H.get(i, n));
if ((eps * t) * t > 1)
{
RowColumnOps.divCol(H, n, t);
}
}
}
// Complex vector
}
else if (q < 0)
{
int l = n - 1;
// Last vector component imaginary so matrix is triangular
if (abs(H.get(n, n-1)) > abs(H.get(n-1, n)))
{
H.set(n-1, n-1, q / H.get(n, n-1));
H.set(n-1, n, -(H.get(n, n) - p) / H.get(n, n-1));
}
else
{
Complex.cDiv(0.0, -H.get(n-1, n), H.get(n-1, n-1) - p, q, cr);
H.set(n-1, n-1, cr[0]);
H.set(n-1, n, cr[1]);
}
H.set(n, n-1, 0.0);
H.set(n, n, 1.0);
for (int i = n - 2; i >= 0; i--)
{
double ra, sa, vr, vi;
ra = 0.0;
sa = 0.0;
for (int j = l; j <= n; j++)
{
ra = ra + H.get(i, j) * H.get(j, n-1);
sa = sa + H.get(i, j) * H.get(j, n);
}
w = H.get(i, i) - p;
if (e[i] < 0.0)
{
z = w;
r = ra;
s = sa;
}
else
{
l = i;
if (e[i] == 0)
{
Complex.cDiv(-ra, -sa, w, q, cr);
H.set(i, n-1, cr[0]);
H.set(i, n, cr[1]);
}
else
{
hqr2SolveComplexEigenEquation(i, p, q, eps, norm, w, z, r, ra, sa, s, cr, n);
}
// Overflow control
t = max(abs(H.get(i, n-1)), abs(H.get(i, n)));
if ((eps * t) * t > 1)
{
RowColumnOps.multCol(H, n-1, i, n+1, (1/t));
RowColumnOps.multCol(H, n , i, n+1, (1/t));
}
}
}
}
}
}
private double hqr2GetNormStart(int nn, int low, int high)
{
// Store roots isolated by balanc and compute matrix norm
double norm = 0.0;
for (int i = 0; i < nn; i++)
{
if (i < low | i > high)
{
d[i] = H.get(i, i);
e[i] = 0.0;
}
for (int j = max(i - 1, 0); j < nn; j++)
{
norm = norm + abs(H.get(i, j));
}
}
return norm;
}
private void backtransform(int nn, int low, int high)
{
double z;
// Back transformation to get eigenvectors of original matrix
for (int j = nn - 1; j >= low; j--)
{
for (int i = low; i <= high; i++)
{
z = 0.0;
for (int k = low; k <= min(j, high); k++)
{
z = z + V.get(i, k) * H.get(k, j);
}
V.set(i, j, z);
}
}
}
private void hqr2FoundTwoRoots(double exshift, int n, int nn, int low, int high)
{
double w, p, q, z, x, s, r;
w = H.get(n, n - 1) * H.get(n - 1, n);
p = (H.get(n - 1, n - 1) - H.get(n, n)) / 2.0;
q = p * p + w;
z = sqrt(abs(q));
H.increment(n, n, exshift);
H.increment(n - 1, n - 1, exshift);
x = H.get(n, n);
// Real pair
if (q >= 0)
{
if (p >= 0)
z = p + z;
else
z = p - z;
d[n - 1] = x + z;
d[n] = d[n - 1];
if (z != 0.0)
d[n] = x - w / z;
e[n - 1] = 0.0;
e[n] = 0.0;
x = H.get(n, n - 1);
s = abs(x) + abs(z);
p = x / s;
q = z / s;
r = sqrt(p * p + q * q);
p = p / r;
q = q / r;
// Row modification
rowOpTransform(H, n - 1, nn - 1, n, q, p);
// Column modification
columnOpTransform(H, 0, n, n, q, p, -1);
// Accumulate transformations
columnOpTransform(V, low, high, n, q, p, -1);
}
else // Complex pair
{
d[n - 1] = x + p;
d[n] = x + p;
e[n - 1] = z;
e[n] = -z;
}
}
private void orthesAccumulateTransforamtions(int high, int low, final double[] ort)
{
for (int m = high - 1; m >= low + 1; m--)
{
if (H.get(m, m-1) != 0.0)
{
for (int i = m + 1; i <= high; i++)
{
ort[i] = H.get(i, m-1);
}
for (int j = m; j <= high; j++)
{
double g = 0.0;
for (int i = m; i <= high; i++)
{
g += ort[i] * V.get(i, j);
}
// Double division avoids possible underflow
g = (g / ort[m]) / H.get(m, m-1);
RowColumnOps.addMultCol(V, j, m, high+1, g, ort);
}
}
}
}
private void orthesApplyHouseholder(int m, int high, final double[] ort, double h)
{
// Apply Householder similarity transformation
// H = (I-u*u'/h)*H*(I-u*u')/h)
for (int j = m; j < n; j++)
{
double f = 0.0;
for(int i = m; i <= high; i++)
{
f += ort[i] * H.get(i, j);
}
f /= h;
RowColumnOps.addMultCol(H, j, m, high+1, -f, ort);
}
for (int i = 0; i <= high; i++)
{
double f = 0.0;
for(int j = m; j <= high; j++)
{
f += ort[j] * H.get(i, j);
}
f/= h;
RowColumnOps.addMultRow(H, i, m, high+1, -f, ort);
}
}
}
| 37,976 | 29.09271 | 138 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/GenericMatrix.java |
package jsat.linear;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.List;
import java.util.ArrayList;
import java.util.concurrent.Callable;
import jsat.utils.FakeExecutor;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import static jsat.utils.SystemInfo.*;
import static java.lang.Math.*;
/**
* This Class provides default implementations of most all functions in row major form.
* Only a small portion must be implemented by the extending class
*
* @author Edward Raff
*/
public abstract class GenericMatrix extends Matrix
{
private static final long serialVersionUID = -8173419025024676713L;
/**
* Step size if the computation accesses 2*NB2^2 * dataTypeSize data,
* so that the data being worked on fits into the L2 cache
*/
protected static int NB2 = (int) sqrt(L2CacheSize/(8.0*2.0));
/**
* Creates a new matrix of the same type
* @param rows the number of rows for the matrix to have
* @param cols the number of columns for the matrix to have
* @return the empty all zero new matrix
*/
abstract protected Matrix getMatrixOfSameType(int rows, int cols);
@Override
public void mutableAdd(double c, Matrix b)
{
if(!sameDimensions(this, b))
throw new ArithmeticException("Matrix dimensions do not agree");
for(int i = 0; i < rows(); i++)
for(int j = 0; j < cols(); j++)
increment(i, j, c*b.get(i, j));
}
@Override
public void mutableAdd(final double c, final Matrix b, ExecutorService threadPool)
{
if(!sameDimensions(this, b))
throw new ArithmeticException("Matrix dimensions do not agree");
final CountDownLatch latch = new CountDownLatch(LogicalCores);
for(int threadId = 0; threadId < LogicalCores; threadId++)
{
final int ID = threadId;
threadPool.submit(new Runnable() {
public void run()
{
for(int i = 0+ID; i < rows(); i+=LogicalCores)
for(int j = 0; j < cols(); j++)
increment(i, j, c*b.get(i, j));
latch.countDown();
}
});
}
try
{
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(DenseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
@Override
public void mutableAdd(double c)
{
for(int i = 0; i < rows(); i++)
for(int j = 0; j < cols(); j++)
increment(i, j, c);
}
@Override
public void mutableAdd(final double c, ExecutorService threadPool)
{
final CountDownLatch latch = new CountDownLatch(LogicalCores);
for (int threadId = 0; threadId < LogicalCores; threadId++)
{
final int ID = threadId;
threadPool.submit(new Runnable()
{
public void run()
{
for (int i = 0 + ID; i < rows(); i += LogicalCores)
for (int j = 0; j < cols(); j++)
increment(i, j, c);
latch.countDown();
}
});
}
try
{
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(DenseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
@Override
public void multiply(Vec b, double z, Vec c)
{
if(this.cols() != b.length())
throw new ArithmeticException("Matrix dimensions do not agree, [" + rows() +"," + cols() + "] x [" + b.length() + ",1]" );
if(this.rows() != c.length())
throw new ArithmeticException("Target vector dimension does not agree with matrix dimensions. Matrix has " + rows() + " rows but tagert has " + c.length());
if (b.isSparse())
{
for (int i = 0; i < rows(); i++)
{
double dot = 0;
for(IndexValue iv : b)
dot += this.get(i, iv.getIndex()) * iv.getValue();
c.increment(i, dot * z);
}
}
else
{
for (int i = 0; i < rows(); i++)
{
double dot = 0;
for (int j = 0; j < cols(); j++)
dot += this.get(i, j) * b.get(j);
c.increment(i, dot * z);
}
}
}
@Override
public void multiply(Matrix b, Matrix C)
{
if(!canMultiply(this, b))
throw new ArithmeticException("Matrix dimensions do not agree: [" + this.rows() + ", " + this.cols() + "] * [" + b.rows() + ", " + b.cols() + "]");
else if (this.rows() != C.rows() || b.cols() != C.cols())
throw new ArithmeticException("Target Matrix is no the correct size");
/*
* In stead of row echelon order (i, j, k), we compue in "pure row oriented", see
* Data structures in Java for matrix computations
* CONCURRENCY AND COMPUTATION: PRACTICE AND EXPERIENCE
* Concurrency Computat.: Pract. Exper. 2004; 16:799–815 (DOI: 10.1002/cpe.793)
*/
for (int i = 0; i < C.rows(); i++)
for (int k = 0; k < this.cols(); k++)
{
double a = this.get(i, k);
for (int j = 0; j < C.cols(); j++)
C.increment(i, j, a * b.get(k, j));
}
}
@Override
public void multiplyTranspose(Matrix b, Matrix C)
{
if(this.cols() != b.cols())
throw new ArithmeticException("Matrix dimensions do not agree");
else if (this.rows() != C.rows() || b.rows() != C.cols())
throw new ArithmeticException("Target Matrix is no the correct size");
final int iLimit = this.rows();
final int jLimit = b.rows();
final int kLimit = this.cols();
for (int i0 = 0; i0 < iLimit; i0 += NB2)
for (int j0 = 0; j0 < jLimit; j0 += NB2)
for (int k0 = 0; k0 < kLimit; k0 += NB2)
for (int i = i0; i < min(i0 + NB2, iLimit); i++)
for (int j = j0; j < min(j0 + NB2, jLimit); j++)
{
double C_ij = 0;
for (int k = k0; k < min(k0 + NB2, kLimit); k++)
C_ij += this.get(i, k) * b.get(j, k);
C.increment(i, j, C_ij);
}
}
@Override
public void multiplyTranspose(final Matrix b, final Matrix C, ExecutorService threadPool)
{
if(this.cols() != b.cols())
throw new ArithmeticException("Matrix dimensions do not agree");
else if (this.rows() != C.rows() || b.rows() != C.cols())
throw new ArithmeticException("Destination matrix does not have matching dimensions");
final Matrix A = this;
///Should choose step size such that 2*NB2^2 * dataTypeSize <= CacheSize
final int iLimit = this.rows();
final int jLimit = b.rows();
final int kLimit = this.cols();
final int blockStep = Math.min(NB2, Math.max(iLimit/LogicalCores, 1));//reduce block size so we can use all cores if needed.
final CountDownLatch cdl = new CountDownLatch(LogicalCores);
for(int threadNum = 0; threadNum < LogicalCores; threadNum++)
{
final int threadID = threadNum;
threadPool.submit(new Runnable() {
@Override
public void run()
{
for (int i0 = blockStep * threadID; i0 < iLimit; i0 += blockStep * LogicalCores)
for (int k0 = 0; k0 < kLimit; k0 += blockStep)
for (int j0 = 0; j0 < jLimit; j0 += blockStep)
for (int i = i0; i < min(i0 + blockStep, iLimit); i++)
for (int j = j0; j < min(j0 + blockStep, jLimit); j++)
{
double C_ij = 0;
for (int k = k0; k < min(k0 + blockStep, kLimit); k++)
C_ij += A.get(i, k) * b.get(j, k);
C.increment(i, j, C_ij);
}
cdl.countDown();
}
});
}
try
{
cdl.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(DenseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
@Override
public void multiply(final Matrix b, final Matrix C, ExecutorService threadPool)
{
if(!canMultiply(this, b))
throw new ArithmeticException("Matrix dimensions do not agree");
else if(this.rows() != C.rows() || b.cols() != C.cols())
throw new ArithmeticException("Destination matrix does not match the multiplication dimensions");
final CountDownLatch cdl = new CountDownLatch(LogicalCores);
final Matrix A = this;
if(this.rows()/NB2 >= LogicalCores)//Perform block execution only when we have a large enough matrix to keep ever core busy!
{
final int kLimit = A.cols();
final int jLimit = C.cols();
final int iLimit = C.rows();
for (int threadID = 0; threadID < LogicalCores; threadID++)
{
final int ID = threadID;
threadPool.submit(new Runnable()
{
public void run()
{
for (int i0 = NB2 * ID; i0 < iLimit; i0 += NB2 * LogicalCores)
for (int k0 = 0; k0 < kLimit; k0 += NB2)
for (int j0 = 0; j0 < jLimit; j0 += NB2)
for (int i = i0; i < min(i0 + NB2, iLimit); i++)
for (int k = k0; k < min(k0 + NB2, kLimit); k++)
{
double a = A.get(i, k);
for (int j = j0; j < min(j0 + NB2, jLimit); j++)
C.increment(i, j, a * b.get(k, j));
}
}
});
}
return;
}
//Else, normal
for (int threadID = 0; threadID < LogicalCores; threadID++)
{
final int ID = threadID;
threadPool.submit(new Runnable()
{
public void run()
{
for (int i = 0 + ID; i < C.rows(); i += LogicalCores)
for (int k = 0; k < A.cols(); k++)
{
double a = A.get(i, k);
for (int j = 0; j < C.cols(); j++)
C.increment(i, j, a * b.get(k, j));
}
cdl.countDown();
}
});
}
try
{
cdl.await();
}
catch (InterruptedException ex)
{
//faulre? Gah - try seriel
this.multiply(b, C);
}
}
@Override
public void mutableMultiply(double c)
{
for(int i = 0; i < rows(); i++)
for(int j = 0; j < cols(); j++)
set(i, j, get(i, j)*c);
}
@Override
public void mutableMultiply(final double c, ExecutorService threadPool)
{
final CountDownLatch latch = new CountDownLatch(LogicalCores);
for(int threadID = 0; threadID < LogicalCores; threadID++)
{
final int ID = threadID;
threadPool.submit(new Runnable() {
public void run()
{
for(int i = ID; i < rows(); i+=LogicalCores)
for(int j = 0; j < cols(); j++)
set(i, j, get(i, j)*c);
latch.countDown();
}
});
}
try
{
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(DenseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
@Override
public void transposeMultiply(double c, Vec b, Vec x)
{
if(this.rows() != b.length())
throw new ArithmeticException("Matrix dimensions do not agree, [" + cols() +"," + rows() + "] x [" + b.length() + ",1]" );
else if(this.cols() != x.length())
throw new ArithmeticException("Matrix dimensions do not agree with target vector");
for(int i = 0; i < rows(); i++)//if b was sparce, we want to skip every time b_i = 0
{
double b_i = b.get(i);
if(b_i == 0)//Skip, not quite as good as sparce handeling
continue;//TODO handle sparce input vector better
for(int j = 0; j < cols(); j++)
x.increment(j, c*b_i*this.get(i, j));
}
}
@Override
public void transposeMultiply(final Matrix b, Matrix C)
{
transposeMultiply(b, C, new FakeExecutor());
}
@Override
public void transposeMultiply(final Matrix b, final Matrix C, ExecutorService threadPool)
{
if(this.rows() != b.rows())//Normaly it is A_cols == B_rows, but we are doint A'*B, not A*B
throw new ArithmeticException("Matrix dimensions do not agree");
else if(this.cols() != C.rows() || b.cols() != C.cols())
throw new ArithmeticException("Destination matrix does not have matching dimensions");
final Matrix A = this;
///Should choose step size such that 2*NB2^2 * dataTypeSize <= CacheSize
final int iLimit = C.rows();
final int jLimit = C.cols();
final int kLimit = this.rows();
final int blockStep = Math.min(NB2, Math.max(iLimit/LogicalCores, 1));//reduce block size so we can use all cores if needed.
final CountDownLatch cdl = new CountDownLatch(LogicalCores);
for(int threadNum = 0; threadNum < LogicalCores; threadNum++)
{
final int threadID = threadNum;
threadPool.submit(new Runnable() {
public void run()
{
for (int i0 = blockStep * threadID; i0 < iLimit; i0 += blockStep * LogicalCores)
for (int k0 = 0; k0 < kLimit; k0 += blockStep)
for (int j0 = 0; j0 < jLimit; j0 += blockStep)
for (int k = k0; k < min(k0 + blockStep, kLimit); k++)
for (int i = i0; i < min(i0 + blockStep, iLimit); i++)
{
double a = A.get(k, i);
for (int j = j0; j < min(j0 + blockStep, jLimit); j++)
C.increment(i, j, a * b.get(k, j));
}
cdl.countDown();
}
});
}
try
{
cdl.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(DenseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
@Override
public void mutableTranspose()
{
if (!this.isSquare())
throw new ArithmeticException("Can only mutable transpose square matrices");
for (int i = 0; i < rows() - 1; i++)
for (int j = i + 1; j < cols(); j++)
{
double tmp = get(j, i);
set(j, i, get(i, j));
set(i, j, tmp);
}
}
@Override
public void transpose(Matrix C)
{
if(this.rows() != C.cols() || this.cols() != C.rows())
throw new ArithmeticException("Target matrix does not have the correct dimensions");
for (int i0 = 0; i0 < rows(); i0 += NB2)
for (int j0 = 0; j0 < cols(); j0 += NB2)
for (int i = i0; i < min(i0+NB2, rows()); i++)
for (int j = j0; j < min(j0+NB2, cols()); j++)
C.set(j, i, this.get(i, j));
}
@Override
public void swapRows(int r1, int r2)
{
if(r1 >= rows() || r2 >= rows())
throw new ArithmeticException("Can not swap row, matrix is smaller then requested");
else if(r1 < 0 || r2 < 0)
throw new ArithmeticException("Can not swap row, there are no negative row indices");
for(int j = 0; j < cols(); j++)
{
double tmp = get(r1, j);
set(r1, j, get(r2, j));
set(r2, j, tmp);
}
}
@Override
public void zeroOut()
{
for(int i = 0; i < rows(); i++)
for(int j = 0; j < cols(); j++)
set(i, j, 0);
}
@Override
public Matrix[] lup()
{
Matrix[] lup = new Matrix[3];
Matrix P = eye(rows());
Matrix L;
Matrix U = this;
//Initalization is a little wierd b/c we want to handle rectangular cases as well!
if(rows() > cols())//In this case, we will be changing U before returning it (have to make it smaller, but we can still avoid allocating extra space
L = getMatrixOfSameType(rows(), cols());
else
L = getMatrixOfSameType(rows(), rows());
for(int i = 0; i < U.rows(); i++)
{
//If rectangular, we still need to loop through to update ther est of L - even though we wont make many other changes
if(i < U.cols())
{
//Partial pivoting, find the largest value in this colum and move it to the top!
//Find the largest magintude value in the colum k, row j
int largestRow = i;
double largestVal = Math.abs(U.get(i, i));
for (int j = i + 1; j < U.rows(); j++)
{
double rowJLeadVal = Math.abs(U.get(j, i));
if (rowJLeadVal > largestVal)
{
largestRow = j;
largestVal = rowJLeadVal;
}
}
//SWAP!
U.swapRows(largestRow, i);
P.swapRows(largestRow, i);
L.swapRows(largestRow, i);
L.set(i, i, 1);
}
//Seting up L
for(int k = 0; k < Math.min(i, U.cols()); k++)
{
double tmp = U.get(i, k)/U.get(k, k);
L.set(i, k, (Double.isNaN(tmp) ? 0.0 : tmp) );
U.set(i, k, 0.0);
for(int j = k+1; j < U.cols(); j++)
{
U.increment(i, j, -L.get(i, k)*U.get(k, j));
}
}
}
if(rows() > cols())//Clean up!
{
//We need to change U to a square nxn matrix in this case, we can safely drop the last 2 rows!
Matrix newU = getMatrixOfSameType(cols(), cols());
for(int i = 0; i < cols(); i++)
for(int j = 0; j < cols(); j++)
newU.set(i, j, U.get(i, j));
U = newU;
}
lup[0] = L;
lup[1] = U;
lup[2] = P;
return lup;
}
@Override
public Matrix[] lup(ExecutorService threadPool)
{
Matrix[] lup = new Matrix[3];
final Matrix P = eye(rows());
final Matrix L;
Matrix U = this;
final Matrix UU = U;
//Initalization is a little wierd b/c we want to handle rectangular cases as well!
if(rows() > cols())//In this case, we will be changing U before returning it (have to make it smaller, but we can still avoid allocating extra space
L = new DenseMatrix(rows(), cols());
else
L = new DenseMatrix(rows(), rows());
try
{
List<Future<Integer>> bigIndecies = new ArrayList<Future<Integer>>(LogicalCores);
for (int k = 0; k < Math.min(rows(), cols()); k++)
{
//Partial pivoting, find the largest value in this colum and move it to the top!
//Find the largest magintude value in the colum k, row j
int largestRow = k;
double largestVal = Math.abs(U.get(k, k));
if (bigIndecies.isEmpty())
for (int j = k + 1; j < U.rows(); j++)
{
double rowJLeadVal = Math.abs(U.get(j, k));
if (rowJLeadVal > largestVal)
{
largestRow = j;
largestVal = rowJLeadVal;
}
}
else
{
for (Future<Integer> fut : bigIndecies)
{
int j = fut.get();
if(j < 0)//Can happen if they are all zeros
continue;
double rowJLeadVal = Math.abs(U.get(j, k));
if (rowJLeadVal > largestVal)
{
largestRow = j;
largestVal = rowJLeadVal;
}
}
bigIndecies.clear();
}
//SWAP!
U.swapRows(largestRow, k);
P.swapRows(largestRow, k);
L.swapRows(largestRow, k);
L.set(k, k, 1.0);
//Seting up L
final int kk = k;
for (int threadNumber = 0; threadNumber < LogicalCores; threadNumber++)
{
final int threadID = threadNumber;
bigIndecies.add(threadPool.submit(new Callable<Integer>() {
public Integer call() throws Exception
{
double largestSeen = 0.0;
int largestIndex = -1;
for(int i = kk+1+threadID; i < UU.rows(); i+=LogicalCores)
{
double tmp = UU.get(i, kk)/UU.get(kk, kk);
L.set(i, kk, (Double.isNaN(tmp) ? 0.0 : tmp) );
//We perform the first iteration of the loop outside, as we want to cache its value for searching later
UU.increment(i, kk+1, -L.get(i, kk)*UU.get(kk, kk+1));
if(Math.abs(UU.get(i,kk+1)) > largestSeen)
{
largestSeen = Math.abs(UU.get(i,kk+1));
largestIndex = i;
}
for(int j = kk+2; j < UU.cols(); j++)
{
UU.increment(i, j, -L.get(i, kk)*UU.get(kk, j));
}
}
return largestIndex;
}
}));
}
}
//Zero out the bottom rows
for (int k = 0; k < Math.min(rows(), cols()); k++)
for (int j = 0; j < k; j++)
U.set(k, j, 0);
if(rows() > cols())//Clean up!
{
//We need to change U to a square nxn matrix in this case, we can safely drop the last 2 rows!
Matrix newU = getMatrixOfSameType(cols(), cols());
for(int i = 0; i < cols(); i++)
for(int j = 0; j < cols(); j++)
newU.set(i, j, U.get(i, j));
U = newU;
}
lup[0] = L;
lup[1] = U;
lup[2] = P;
return lup;
}
catch (InterruptedException ex)
{
Logger.getLogger(DenseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
catch (ExecutionException ex)
{
Logger.getLogger(DenseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
throw new RuntimeException("Uncrecoverable Error");
}
@Override
public Matrix[] qr()
{
int N = cols(), M = rows();
Matrix[] qr = new Matrix[2];
Matrix Q = Matrix.eye(M);
Matrix A;
if(isSquare())
{
mutableTranspose();
A = this;
}
else
A = this.transpose();
int to = cols() > rows() ? M : N;
double[] vk = new double[M];
for(int k = 0; k < to; k++)
{
double vkNorm = initalVKNormCompute(k, M, vk, A);
double beta = vkNorm;
double vk_k = vk[k] = A.get(k, k);//force into register, help the JIT!
vkNorm += vk_k*vk_k;
vkNorm = sqrt(vkNorm);
double alpha = -signum(vk_k) * vkNorm;
vk_k -= alpha;
vk[k] = vk_k;
beta += vk_k*vk_k;
if (beta == 0)
continue;
double TwoOverBeta = 2.0 / beta;
qrUpdateQ(Q, k, vk, TwoOverBeta);
qrUpdateR(k, N, A, vk, TwoOverBeta, M);
}
qr[0] = Q;
if(isSquare())
{
A.mutableTranspose();
qr[1] = A;
}
else
qr[1] = A.transpose();
return qr;
}
private void qrUpdateR(int k, int N, Matrix A, double[] vk, double TwoOverBeta, int M)
{
//First run of loop removed, as it will be setting zeros. More accurate to just set them ourselves
if(k < N)
{
qrUpdateRInitalLoop(k, A, vk, TwoOverBeta, M);
}
//The rest of the normal look
for(int j = k+1; j < N; j++)
{
double y = 0;//y = vk dot A_j
for(int i = k; i < A.cols(); i++)
y += vk[i]*A.get(j, i);
y *= TwoOverBeta;
for(int i = k; i < M; i++)
A.increment(j, i, -y*vk[i]);
}
}
private void qrUpdateRInitalLoop(int k, Matrix A, double[] vk, double TwoOverBeta, int M)
{
double y = 0;//y = vk dot A_j
for(int i = k; i < A.cols(); i++)
y += vk[i]*A.get(k, i);
y *= TwoOverBeta;
A.increment(k, k, -y*vk[k]);
for(int i = k+1; i < M; i++)
A.set(k, i, 0.0);
}
private void qrUpdateQ(Matrix Q, int k, double[] vk, double TwoOverBeta)
{
//We are computing Q' in what we are treating as the column major order, which represents Q in row major order, which is what we want!
for(int j = 0; j < Q.cols(); j++)
{
double y = 0;//y = vk dot A_j
for (int i = k; i < Q.cols(); i++)
y += vk[i] * Q.get(j, i);
y *= TwoOverBeta;
for (int i = k; i < Q.rows(); i++)
Q.increment(j, i, -y*vk[i]);
}
}
private double initalVKNormCompute(int k, int M, double[] vk, Matrix A)
{
double vkNorm = 0.0;
for(int i = k+1; i < M; i++)
{
vk[i] = A.get(k, i);
vkNorm += vk[i]*vk[i];
}
return vkNorm;
}
@Override
public Matrix[] qr(ExecutorService threadPool)
{
final int N = cols(), M = rows();
Matrix[] qr = new Matrix[2];
final Matrix Q = Matrix.eye(M);
final Matrix A;
if(isSquare())
{
mutableTranspose();
A = this;
}
else
A = this.transpose();
final double[] vk = new double[M];
int to = cols() > rows() ? M : N;
for(int k = 0; k < to; k++)
{
double vkNorm = initalVKNormCompute(k, M, vk, A);
double beta = vkNorm;
double vk_k = vk[k] = A.get(k, k);
vkNorm += vk_k*vk_k;
vkNorm = sqrt(vkNorm);
double alpha = -signum(vk_k) * vkNorm;
vk_k -= alpha;
beta += vk_k*vk_k;
vk[k] = vk_k;
if(beta == 0)
continue;
final double TwoOverBeta = 2.0/beta;
final CountDownLatch latch = new CountDownLatch(LogicalCores);
for (int ID = 0; ID < LogicalCores; ID++)
{
final int threadID = ID;
final int kk = k;
threadPool.submit(new Runnable()
{
public void run()
{
parallelQRUpdateQ();
parallelQRUpdateR();
latch.countDown();
}
private void parallelQRUpdateR()
{
//First run of loop removed, as it will be setting zeros. More accurate to just set them ourselves
if (kk < N && threadID == 0)
{
parallelQRUpdateRFirstIteration();
}
//The rest of the normal look
for (int j = kk + 1 + threadID; j < N; j += LogicalCores)
{
double y = 0;//y = vk dot A_j
for (int i = kk; i < A.cols(); i++)
y += vk[i] * A.get(j, i);
y *= TwoOverBeta;
for (int i = kk; i < M; i++)
A.increment(j, i, -y * vk[i]);
}
}
private void parallelQRUpdateRFirstIteration()
{
double y = 0;//y = vk dot A_j
for (int i = kk; i < A.cols(); i++)
y += vk[i] * A.get(kk, i);
y *= TwoOverBeta;
A.increment(kk, kk, -y * vk[kk]);
for (int i = kk + 1; i < M; i++)
A.set(kk, i, 0.0);
}
private void parallelQRUpdateQ()
{
//We are computing Q' in what we are treating as the column major order, which represents Q in row major order, which is what we want!
for (int j = 0 + threadID; j < Q.cols(); j += LogicalCores)
{
double y = 0;//y = vk dot A_j
for (int i = kk; i < Q.cols(); i++)
y += vk[i] * Q.get(j, i);
y *= TwoOverBeta;
for (int i = kk; i < Q.rows(); i++)
Q.increment(j, i, -y * vk[i]);
}
}
});
}
try
{
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(DenseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
qr[0] = Q;
if(isSquare())
{
A.mutableTranspose();
qr[1] = A;
}
else
qr[1] = A.transpose();
return qr;
}
@Override
public Matrix clone()
{
Matrix clone = getMatrixOfSameType(rows(), cols());
clone.mutableAdd(this);
return clone;
}
}
| 32,697 | 33.785106 | 168 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/HessenbergForm.java | package jsat.linear;
import java.io.Serializable;
import java.util.concurrent.ExecutorService;
import static jsat.linear.Matrix.*;
/**
*
* @author Edward Raff
*/
public class HessenbergForm implements Serializable
{
private static final long serialVersionUID = 1411467026933172901L;
public static void hess(Matrix A)
{
hess(A, null);
}
/**
* Alters the matrix A such that it is in upper Hessenberg form.
* @param A the matrix to transform into upper Hessenberg form
*/
public static void hess(Matrix A, ExecutorService threadpool)
{
if(!A.isSquare())
throw new ArithmeticException("Only square matrices can be converted to Upper Hessenberg form");
int m = A.rows();
/**
* Space used to store the vector for updating the columns of A
*/
DenseVector columnUpdateTmp = new DenseVector(m);
double[] vk = new double[m];
/**
* Space used for updating the sub matrix at step i
*/
double[] subMatrixUpdateTmp = new double[m];
double tmp;//Used for temp values
for(int i = 0; i < m-2; i++)
{
//Holds the norm, sqrt{a_i^2 + ... + a_m^2}
double s = 0.0;
//First step of the loop done outside to do extra bit
double sigh = A.get(i+1, i);//Holds the multiplication factor
vk[i+1] = sigh;
s += sigh*sigh;
sigh = sigh > 0 ? 1 : -1;//Sign dosnt change the squaring, so we do it first
for(int j = i+2; j < m; j++)
{
tmp = A.get(j, i);
vk[j] = tmp;
s += tmp*tmp;
}
double s1 = -sigh*Math.sqrt(s);
//Now re use s to quickly get the norm of vk, since it will be almost the same vector
s -= vk[i+1]*vk[i+1];
vk[i+1] -= s1;
s += vk[i+1]*vk[i+1];
double s1Inv = 1.0/Math.sqrt(s);//Re use to store the norm of vk. Do the inverse to multiply quickly instead of divide
for(int j = i+1; j < m; j++)
vk[j] *= s1Inv;
//Update sub sub matrix A[i+1:m, i:m]
//NOTE: The first column that will be altered can be done ourslves, since we know the value set (s1) and that all below it will ber zero
Matrix subA = new SubMatrix(A, i+1, i, m, m);
DenseVector vVec = new DenseVector(vk, i+1, m);
Vec tmpV = new DenseVector(subMatrixUpdateTmp, i, m);
tmpV.zeroOut();
vVec.multiply(subA, tmpV);
if(threadpool == null)
OuterProductUpdate(subA, vVec, tmpV, -2.0);
else
OuterProductUpdate(subA, vVec, tmpV, -2.0, threadpool);
//Zero out ourselves after.
//TODO implement so we dont compute the first row
A.set(i+1, i, s1);
for(int j = i+2; j < m; j++)
A.set(j, i, 0.0);
//Update the columns of A[0:m, i+1:m]
subA = new SubMatrix(A, 0, i+1, m, m);
columnUpdateTmp.zeroOut();
subA.multiply(vVec, 1.0, columnUpdateTmp);
if(threadpool == null)
OuterProductUpdate(subA, columnUpdateTmp, vVec, -2.0);
else
OuterProductUpdate(subA, columnUpdateTmp, vVec, -2.0, threadpool);
}
}
}
| 3,508 | 34.806122 | 148 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/IndexValue.java |
package jsat.linear;
/**
* The value at a specified index for one dimension. This is a tool mean for use with sparce data structures.
* The values should not be backed by any list, and changes to the IndexValue should not alter any data
* structures. This class is mean to be returned by an iterator, and the iterator may reuse the same
* IndexValue object for efficency.
*
* @author Edward Raff
*/
public class IndexValue
{
private int index;
private double value;
/**
* Creates a new IndexValue
* @param index the index for the given value
* @param value the value at the specified index
*/
public IndexValue(int index, double value)
{
this.index = index;
this.value = value;
}
/**
* Sets the index associated with the value.
* @param index the new index
*/
public void setIndex(int index)
{
this.index = index;
}
/**
* Sets the value associated with the index
* @param value the new value
*/
public void setValue(double value)
{
this.value = value;
}
/**
* Returns the index of the stored value
* @return the index of the stored value
*/
public int getIndex()
{
return index;
}
/**
* Returns the value of the stored index
* @return the value of the stored index
*/
public double getValue()
{
return value;
}
}
| 1,453 | 21.369231 | 110 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/LUPDecomposition.java |
package jsat.linear;
import java.io.Serializable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.utils.SystemInfo;
/**
* This class uses the LUP decomposition of a matrix to provide efficient methods for solving A x = b, as well as computing the determinant of A.
* @author Edward Raff
*/
public class LUPDecomposition implements Cloneable, Serializable
{
private static final long serialVersionUID = -149659693838168048L;
private static final int threads = SystemInfo.LogicalCores;
private final Matrix L, U, P;
public LUPDecomposition(Matrix L, Matrix U, Matrix P)
{
this.L = L;
this.U = U;
this.P = P;
}
public LUPDecomposition(Matrix A)
{
Matrix[] lup = A.clone().lup();
L = lup[0];
U = lup[1];
P = lup[2];
}
public LUPDecomposition(Matrix A, ExecutorService threadpool)
{
Matrix[] lup = A.clone().lup(threadpool);
L = lup[0];
U = lup[1];
P = lup[2];
}
/**
*
* @return true if the original matrix A, from which this factorization is from, is a square matrix
*/
public boolean isSquare()
{
return L.isSquare() && U.isSquare();
}
/**
*
* @return the determinant of the original Matrix A, |A|
*/
public double det()
{
if(!isSquare())
throw new ArithmeticException("Rectangual matricies do not have a determinat");
double det = 1;
for(int i = 0; i < Math.min(U.rows(), U.cols()); i++)
det *= U.get(i, i);
//We need to swap back P to get the sign, so we make a clone. This could be cached if we need to
int rowSwaps = 0;
Matrix pCopy = P.clone();
//The number of row swaps in P is the sign change
for(int i = 0; i < pCopy.cols(); i++)
if(pCopy.get(i, i) != 1)
{
rowSwaps++;
//find the row that has our '1'!
int j = i+1;
while(pCopy.get(j, i) == 0)
j++;
pCopy.swapRows(i, j);//Dont really care who we swap with, it will work out in the end
}
return rowSwaps % 2 !=0 ? -det : det;
}
public Vec solve(Vec b)
{
//Solve P A x = L U x = P b, for x
//First solve L y = P b
Vec y = forwardSub(L, P.multiply(b));
//Sole U x = y
Vec x = backSub(U, y);
return x;
}
public Matrix solve(Matrix B)
{
//Solve P A x = L U x = P b, for x
//First solve L y = P b
Matrix y = forwardSub(L, P.multiply(B));
//Sole U x = y
Matrix x = backSub(U, y);
return x;
}
public Matrix solve(Matrix B, ExecutorService threadpool)
{
//Solve P A x = L U x = P b, for x
//First solve L y = P b
Matrix y = forwardSub(L, P.multiply(B), threadpool);
//Sole U x = y
Matrix x = backSub(U, y, threadpool);
return x;
}
@Override
public LUPDecomposition clone()
{
return new LUPDecomposition(L.clone(), U.clone(), P.clone());
}
/**
* Solves for the vector x such that L x = b
*
* @param L a lower triangular matrix
* @param b a vector whos length is equal to the rows in L
* @return x such that L x = b
*/
public static Vec forwardSub(Matrix L, Vec b)
{
if(b.length() != L.rows())
throw new ArithmeticException("Vector and matrix sizes do not agree");
Vec y = b instanceof SparseVector ? new SparseVector(b.length()) : new DenseVector(b.length());
for(int i = 0; i < b.length(); i++)
{
double y_i = b.get(i);
for(int j = 0; j < i; j++)
y_i -= L.get(i, j)*y.get(j);
y_i /= L.get(i, i);
y.set(i, y_i);
}
return y;
}
/**
* Solves for the matrix x such that L x = b
*
* @param L a lower triangular matrix
* @param b a matrix with the same number of rows as L
* @return x such that L x = b
*/
public static Matrix forwardSub(Matrix L, Matrix b)
{
if (b.rows() != L.rows())
throw new ArithmeticException("Vector and matrix sizes do not agree");
Matrix y = new DenseMatrix(b.rows(), b.cols());
//Store the colum seperatly so that we can access this array in row major order, instead of the matrix in column major (yay cache!)
double[] y_col_k = new double[b.rows()];
for (int k = 0; k < b.cols(); k++)
{
for (int i = 0; i < b.rows(); i++)//We operate the same as forwardSub(Matrix, Vec), but we aplly each column of B as its own Vec.
{
y_col_k[i] = b.get(i, k);
for (int j = 0; j < i; j++)
y_col_k[i] -= L.get(i, j) * y_col_k[j];
y_col_k[i] /= L.get(i, i);
}
for(int z = 0; z < y_col_k.length; z++)
y.set(z, k, y_col_k[z]);
}
return y;
}
/**
* Solves for the matrix x such that L x = b
*
* @param L a lower triangular matrix
* @param b a matrix with the same number of rows as L
* @param threadpool source of threads for the parallel computation
* @return x such that L x = b
*/
public static Matrix forwardSub(final Matrix L, final Matrix b, ExecutorService threadpool)
{
if (b.rows() != L.rows())
throw new ArithmeticException("Vector and matrix sizes do not agree");
final CountDownLatch latch = new CountDownLatch(threads);
final Matrix y = new DenseMatrix(b.rows(), b.cols());
for(int threadNum = 0; threadNum < threads; threadNum++)
{
final int threadID = threadNum;
threadpool.submit(new Runnable() {
public void run()
{
//Store the colum seperatly so that we can access this array in row major order, instead of the matrix in column major (yay cache!)
double[] y_col_k = new double[b.rows()];
for (int k = threadID; k < b.cols(); k+=threads)
{
for (int i = 0; i < b.rows(); i++)//We operate the same as forwardSub(Matrix, Vec), but we aplly each column of B as its own Vec. We sawp the order for better cache use
{
y_col_k[i] = b.get(i, k);
for (int j = 0; j < i; j++)
y_col_k[i] -= L.get(i, j) * y_col_k[j];
y_col_k[i] /= L.get(i, i);
//y.set(i, k, y_i);
}
for(int z = 0; z < y_col_k.length; z++)
y.set(z, k, y_col_k[z]);
}
latch.countDown();
}
});
}
try
{
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(LUPDecomposition.class.getName()).log(Level.SEVERE, null, ex);
return forwardSub(L, b);
}
return y;
}
/**
* Solves for the vector x such that U x = y
*
* @param U an upper triangular matrix
* @param y a vector whos length is equal to the rows in U
* @return x such that U x = y
*/
public static Vec backSub(Matrix U, Vec y)
{
if (y.length() != U.rows())
throw new ArithmeticException("Vector and matrix sizes do not agree");
Vec x = y instanceof SparseVector ? new SparseVector(U.cols()) : new DenseVector(U.cols());
final int start = Math.min(U.rows(), U.cols())-1;
for (int i = start; i >= 0; i--)
{
double x_i = y.get(i);
for (int j = i + 1; j <= start; j++)
x_i -= U.get(i, j) * x.get(j);
x_i /= U.get(i, i);
if(Double.isInfinite(x_i))//Occurs when U_(i,i) = 0
x_i = 0;
x.set(i, x_i);
}
return x;
}
/**
* Solves for the matrix x such that U x = y
*
* @param U an upper triangular matrix
* @param y a matrix with the same number of rows as U
* @return x such that U x = y
*/
public static Matrix backSub(Matrix U, Matrix y)
{
if (y.rows() != U.rows())
throw new ArithmeticException("Vector and matrix sizes do not agree");
Matrix x = new DenseMatrix(U.cols(), y.cols());
double[] x_col_k = new double[y.rows()];
final int start = Math.min(U.rows(), U.cols())-1;
for (int k = 0; k < y.cols(); k++)
{
for (int i = start; i >= 0; i--)//We operate the same as forwardSub(Matrix, Vec), but we aplly each column of B as its own Vec.
{
x_col_k[i] = y.get(i, k);
for (int j = i + 1; j <= start; j++)
x_col_k[i] -= U.get(i, j) * x_col_k[j];
x_col_k[i] /= U.get(i, i);
}
for(int i = 0; i < x_col_k.length; i++)
if(Double.isInfinite(x_col_k[i]))//Occurs when U_(i,i) = 0
x.set(i, k, 0);
else
x.set(i, k, x_col_k[i]);
}
return x;
}
/**
* Solves for the matrix x such that U x = y
*
* @param U an upper triangular matrix
* @param y a matrix with the same number of rows as U
* @param threadpool source of threads for the parallel computation
* @return x such that U x = y
*/
public static Matrix backSub(final Matrix U, final Matrix y, ExecutorService threadpool)
{
if (y.rows() != U.rows())
throw new ArithmeticException("Vector and matrix sizes do not agree");
final Matrix x = new DenseMatrix(U.cols(), y.cols());
final CountDownLatch latch = new CountDownLatch(threads);
final int start = Math.min(U.rows(), U.cols())-1;
for (int threadNum = 0; threadNum < threads; threadNum++)
{
final int threadID = threadNum;
threadpool.submit(new Runnable()
{
public void run()
{
double[] x_col_k = new double[y.rows()];
for (int k = threadID; k < y.cols(); k += threads)
{
for (int i = start; i >= 0; i--)//We operate the same as forwardSub(Matrix, Vec), but we aplly each column of B as its own Vec.
{
x_col_k[i] = y.get(i, k);
for (int j = i + 1; j <= start; j++)
x_col_k[i] -= U.get(i, j) * x_col_k[j];
x_col_k[i] /= U.get(i, i);
}
for (int i = 0; i < x_col_k.length; i++)
if(Double.isInfinite(x_col_k[i]))//Occurs when U_(i,i) = 0
x.set(i, k, 0);
else
x.set(i, k, x_col_k[i]);
}
latch.countDown();
}
});
}
try
{
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(LUPDecomposition.class.getName()).log(Level.SEVERE, null, ex);
return backSub(U, y);
}
return x;
}
}
| 12,001 | 30.920213 | 192 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/Lanczos.java | /*
* This implementation has been contributed under the Public Domain.
*/
package jsat.linear;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Random;
import jsat.utils.random.RandomUtil;
/**
* Computes the top <i>k</i> Eigen Values and Eigen Vectors of a symmetric
* matrix <b>A<sup>n,n</sup></b>. <b>A</b> may be Sparse or Dense, and the results will be
* computed faster than using the more general purpose
* {@link EigenValueDecomposition}.<br>
* <br>
* If a non symmetric matrix <b>X<sup>n,m</sup></b> is given, this can implicit
* compute the top-<i>k</i> eigen values and vectors for the matrix
* <b>A=X<sup>T</sup> X</b> or <b>A=X X<sup>T</sup></b>, without having to
* explicitly construct the potentially larger matrix A.
*
*
* @author Edward Raff <[email protected]>
*/
public class Lanczos implements Serializable
{
/**
* The eigen values of the resulting decomposition
*/
public double[] d;
public Matrix eigenVectors;
public Lanczos(Matrix A, int k, boolean A_AT, boolean is_symmetric)
{
Random rand = RandomUtil.getRandom();
int dims = A_AT ? A.rows() : A.cols();
/**
* The rank that we will perform computations too
*/
int k_work = Math.min(k*2+1, dims);
int extra_ranks = k_work-k;
Vec v_prev = new ConstantVector(0.0, dims);
//1. Let v_{1} be an arbitrary vector with Euclidean norm 1.
Vec v_next = new DenseVector(dims);//init to 1/sqrt(n)
v_next.add(1.0/Math.sqrt(dims));
double[] alpha = new double[k_work];
double[] beta = new double[k_work];
DenseMatrix V = new DenseMatrix(k_work, dims);
/**
* Working variable
*/
DenseVector w_j = new DenseVector(dims);
DenseVector tmp = new DenseVector(A_AT ? A.cols() : A.rows());
for(int j = 0; j < k_work; j++)
{
w_j.zeroOut();
//3. Let w'_j = A v_j
if(is_symmetric)
//w'_j
A.multiply(v_next, 1.0, w_j);
else//We are doing A * A' * v_i
{
tmp.zeroOut();
if(A_AT)//We are doing A * A' * v_i
{
A.transposeMultiply(1.0, v_next, tmp);
A.multiply(tmp, 1.0, w_j);
}
else//We are doing A' * A * v_i
{
A.multiply(v_next, 1.0, tmp);
A.transposeMultiply(1.0, tmp, w_j);
}
}
//4. Let α_j =w'_j^T v_j.
alpha[j] = w_j.dot(v_next);
//5. Let w_j =w'_j- α_j v_j - β_j v_{j-1}}.
w_j.mutableAdd(-alpha[j], v_next);
w_j.mutableAdd(-beta[j], v_prev);
//TODO, do not do full-orthogonalization! Thats too much
orthogonalize(j, V, w_j);
//Save off the row of V we just computed
v_prev = V.getRowView(j);
v_next.copyTo(v_prev);
//For simplicity, we do the first "two" steps at the end
if(j+1 < k_work)
{
//1. β_{j+1}=||w_j||
beta[j+1] = w_j.pNorm(2);
//2a. If β_{j+1} == 0, pick as v_{j+1} an arbitrary vector with Euclidean norm 1 that is orthogonal to all of v_{1},... ,v_{j-1}}.
if(Math.abs(beta[j+1]) < 1e-15)
{
//We need to pick a new value for w_j, which will become v_{j+1}
//fill will random values
w_j.applyFunction(x->rand.nextDouble()*2-1);
orthogonalize(j+1, V, w_j);
w_j.mutableDivide(w_j.pNorm(2)+1e-15);
beta[j+1] = 1;
}
//2b. v_{j+1}=w_j/β_{j+1}}
w_j.copyTo(v_next);
v_next.mutableDivide(beta[j+1]);
}
}
//Inefficient computation of eigen values & vectors of diagonal matrix.
//TODO is to replace with smart implementaiton
DenseMatrix triDaig = new DenseMatrix(k_work, k_work);
for(int i = 0; i < k_work; i++)
{
triDaig.set(i, i, alpha[i]);
if(i+1 < k_work)
{
triDaig.set(i, i+1, beta[i+1]);
triDaig.set(i+1, i, beta[i+1]);
}
}
EigenValueDecomposition evd = new EigenValueDecomposition(triDaig);
//Sorty by largest magnitude eigen values first
evd.sortByEigenValue((a,b) -> -Double.compare(Math.abs(a), Math.abs(b)));
d = Arrays.copyOf(evd.getRealEigenvalues(), k);
// d = evd.getRealEigenvalues();
eigenVectors = V.transposeMultiply(evd.getV());
eigenVectors.changeSize(dims, k);
}
/**
* Returns a Vector of length <t>k</t> with the eigen values of the matrix
* @return a vector of the eigen values
*/
public Vec getEigenValues()
{
return new DenseVector(d);
}
/**
* Returns a <t>n,k</t> matrix of the eigen vectors computed, where <n> is
* the dimension of the original input.
*
* @return a <t>n,k</t> matrix of the eigen vectors
*/
public Matrix getEigenVectors()
{
return eigenVectors;
}
/**
* Helper function for orthogonalizing vectors against existing vectors. One
* call to this method performs partial orthogonalization, two sequential
* calls does full orthogonalization
*
* @param j the current limit of vectors (rows) in V to orthogonalize
* against
* @param V the matrix of Vectors to be orthogonalized
* @param w_j the vector to make orthogonal compared to the previous j vectors
*/
private void orthogonalize(int j, DenseMatrix V, Vec w_j)
{
//Orthogonalize step that is needed, but not include din Wiki
for(int i = 0; i < j; i++)
{
Vec V_i = V.getRowView(i);
double tmp_dot = w_j.dot(V_i);
if(Math.abs(tmp_dot) < 1e-15)//essentially zero, nothing to orthogonalize
continue;
w_j.mutableAdd(-tmp_dot, V_i);
}
}
}
| 6,476 | 32.559585 | 147 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/Matrix.java |
package jsat.linear;
import java.io.Serializable;
import java.util.Iterator;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.utils.ModifiableCountDownLatch;
import static jsat.utils.SystemInfo.*;
/**
* Generic class with some pre-implemented methods for a Matrix object.
* Throughout the documentation, the object that has its method called on will
* be denoted as <i>A</i>. So if you have code that looks like
* <br><br><center>
* {@code Matrix gramHat = gram.subtract(Matrix.eye(gram.rows()));}
* </center><br><br>
* Then {@code gram} would be the matrix <i>A</i> in the documentation.
* <br>
* Matrices will use a capital letter, vectors a <b>bold</b> lower case letter,
* and scalars a normal lower case letter.
*
*
* @author Edward Rafff
*/
public abstract class Matrix implements Cloneable, Serializable
{
private static final long serialVersionUID = 6888360415978051714L;
/**
* Creates a new Matrix that stores the result of {@code A+B}
* @param B the matrix to add this <i>this</i>
* @return {@code A+B}
*/
public Matrix add(Matrix B)
{
Matrix toReturn = getThisSideMatrix(B);
toReturn.mutableAdd(1.0, B);
return toReturn;
}
/**
* Creates a new Matrix that stores the result of {@code A+B}
* @param B the matrix to add this <i>this</i>
* @param threadPool the source of threads to do computation in parallel
* @return {@code A+B}
*/
public Matrix add(Matrix B, ExecutorService threadPool)
{
Matrix toReturn = getThisSideMatrix(B);
toReturn.mutableAdd(1.0, B, threadPool);
return toReturn;
}
/**
* Creates a new Matrix that stores the result of {@code A+c}
* @param c the scalar to add to each value in <i>this</i>
* @return {@code A+c}
*/
public Matrix add(double c)
{
Matrix toReturn = getThisSideMatrix(null);
toReturn.mutableAdd(c);
return toReturn;
}
/**
* Creates a new Matrix that stores the result of {@code A+c}
* @param c the scalar to add to each value in <i>this</i>
* @param threadPool the source of threads to do computation in parallel
* @return {@code A+B}
*/
public Matrix add(double c, ExecutorService threadPool)
{
Matrix toReturn = getThisSideMatrix(null);
toReturn.mutableAdd(c, threadPool);
return toReturn;
}
/**
* Alters the current matrix to store the value <i>A+B</i>
* @param B the matrix to add this <i>this</i>
*/
public void mutableAdd(Matrix B)
{
this.mutableAdd(1.0, B);
}
/**
* Alters the current matrix to store the value <i>A+c*B</i>
* @param c the scalar constant to multiple <i>B</i> by
* @param B the matrix to add to <i>this</i>
*/
abstract public void mutableAdd(double c, Matrix B);
/**
* Alters the current matrix to store the value <i>A+B</i>
* @param B the matrix to add to <i>this</i>
* @param threadpool the source of threads to do computation in parallel
*/
public void mutableAdd(Matrix B, ExecutorService threadpool)
{
this.mutableAdd(1.0, B, threadpool);
}
/**
* Alters the current matrix to store the value <i>A+c*B</i>
* @param c the scalar constant to multiple <i>B</i> by
* @param B the matrix to add to <i>this</i>
* @param threadPool the source of threads to do computation in parallel
*/
abstract public void mutableAdd(double c, Matrix B, ExecutorService threadPool);
/**
* Alters the current matrix to store the value <i>A+c</i>
* @param c the scalar constant to add to <i>this</i>
*/
abstract public void mutableAdd(double c);
/**
* Alters the current matrix to store the value <i>A+c</i>
* @param c the scalar constant to add to <i>this</i>
* @param threadPool the source of threads to do computation in parallel
*/
abstract public void mutableAdd(double c, ExecutorService threadPool);
/**
* Indicates whether or not this matrix can be mutated. If
* {@code false}, any method that contains "mutate" will not work.
* <br><br>
* By default, this returns {@code true}
*
* @return {@code true} if the matrix supports being altered, {@code false}
* other wise.
*/
public boolean canBeMutated()
{
return true;
}
/**
* Returns an appropriate matrix to use for some operation A <i>op</i> B,
* where {@code A = this }
* @param B the other matrix, may be null
* @return a matrix that can be mutated to take the place of A
*/
private Matrix getThisSideMatrix(Matrix B)
{
if(this.canBeMutated())
return this.clone();
else//so far, only other option in JSAT is a dense matrix
{
DenseMatrix dm = new DenseMatrix(rows(), cols());
dm.mutableAdd(this);
return dm;
}
}
/**
* Creates a new Matrix that stores the result of <i>A-B</i>
* @param B the matrix to subtract from <i>this</i>.
* @return a new matrix equal to <i>A-B</i>
*/
public Matrix subtract(Matrix B)
{
Matrix toReturn = getThisSideMatrix(B);
toReturn.mutableSubtract(1.0, B);
return toReturn;
}
/**
* Creates a new Matrix that stores the result of <i>A-B</i>
* @param B the matrix to subtract from <i>this</i>.
* @param threadPool the source of threads to do computation in parallel
* @return a new matrix equal to <i>A-B</i>
*/
public Matrix subtract(Matrix B, ExecutorService threadPool)
{
Matrix toReturn = getThisSideMatrix(B);
toReturn.mutableSubtract(1.0, B, threadPool);
return toReturn;
}
/**
* Creates a new Matrix that stores the result of <i>A-c</i>
* @param c the scalar constant to subtract from <i>this</i>
* @return a new matrix equal to <i>A-B</i>
*/
public Matrix subtract(double c)
{
Matrix toReturn = getThisSideMatrix(null);
toReturn.mutableSubtract(c);
return toReturn;
}
/**
* Creates a new Matrix that stores the result of <i>A-c</i>
* @param c the scalar constant to subtract from <i>this</i>
* @param threadPool the source of threads to do computation in parallel
* @return a new matrix equal to <i>A-B</i>
*/
public Matrix subtract(double c, ExecutorService threadPool)
{
Matrix toReturn = getThisSideMatrix(null);
toReturn.mutableSubtract(c, threadPool);
return toReturn;
}
/**
* Alters the current matrix to store <i>A-B</i>
* @param B the matrix to subtract from <i>this</i>.
*/
public void mutableSubtract(Matrix B)
{
this.mutableSubtract(1.0, B);
}
/**
* Alters the current matrix to store <i>A-c*B</i>
* @param c the scalar constant to multiply <i>B</i> by
* @param B the matrix to subtract from <i>this</i>.
*/
public void mutableSubtract(double c, Matrix B)
{
mutableAdd(-c, B);
}
/**
* Alters the current matrix to store <i>A-B</i>
* @param B the matrix to subtract from <i>this</i>.
* @param threadpool the source of threads to do computation in parallel
*/
public void mutableSubtract(Matrix B, ExecutorService threadpool)
{
this.mutableSubtract(1.0, B, threadpool);
}
/**
* Alters the current matrix to store <i>A-c*B</i>
* @param c the scalar constant to multiply <i>B</i> by
* @param B the matrix to subtract from <i>this</i>.
* @param threadPool the source of threads to do computation in parallel
*/
public void mutableSubtract(double c, Matrix B, ExecutorService threadPool)
{
mutableAdd(-c, B, threadPool);
}
/**
* Alters the current matrix to store <i>A-c</i>
* @param c the scalar constant to subtract from <i>this</i>
*/
public void mutableSubtract(double c)
{
mutableAdd(-c);
}
/**
* Alters the current matrix to store <i>A-c</i>
* @param c the scalar constant to subtract from <i>this</i>
* @param threadPool the source of threads to do computation in parallel
*/
public void mutableSubtract(double c, ExecutorService threadPool)
{
mutableAdd(-c, threadPool);
}
/**
* If this matrix is <i>A<sub>m x n</sub></i>, and <i><b>b</b></i> has a length of n, and <i><b>c</b></i> has a length of m,
* then this will mutate c to store <i><b>c</b> = <b>c</b> + A*<b>b</b>*z</i>
* @param b the vector to be treated as a colum vector
* @param z the constant to multiply the <i>A*<b>b</b></i> value by.
* @param c where to place the result by addition
* @throws ArithmeticException if the dimensions of A, <b>b</b>, or <b>c</b> do not all agree
*/
abstract public void multiply(Vec b, double z, Vec c);
/**
* Creates a new vector that is equal to <i>A*<b>b</b> </i>
* @param b the vector to multiply by
* @return a new vector <i>A*<b>b</b> </i>
*/
public Vec multiply(Vec b)
{
DenseVector result = new DenseVector(rows());
multiply(b, 1.0, result);
return result;
}
/**
* Creates a new matrix that stores <i>A*B</i>
* @param B the matrix to multiply by
* @return a new matrix <i>A*B</i>
*/
public Matrix multiply(Matrix B)
{
Matrix C = new DenseMatrix(this.rows(), B.cols());
multiply(B, C);
return C;
}
/**
* Creates a new matrix that stores <i>A*B</i>
* @param B the matrix to multiply by
* @param threadPool the source of threads to do computation in parallel
* @return a new matrix <i>A*B</i>
*/
public Matrix multiply(Matrix B, ExecutorService threadPool)
{
Matrix C = new DenseMatrix(this.rows(), B.cols());
multiply(B, C, threadPool);
return C;
}
/**
* Alters the matrix <i>C</i> to be equal to <i>C = C+A*B</i>
* @param B the matrix to multiply <i>this</i> with
* @param C the matrix to add the result to
*/
abstract public void multiply(Matrix B, Matrix C);
/**
* Alters the matrix <i>C</i> to be equal to <i>C = C+A*B</i>
* @param B the matrix to multiply this with
* @param C the matrix to add the result to
* @param threadPool the source of threads to do computation in parallel
*/
abstract public void multiply(Matrix B, Matrix C, ExecutorService threadPool);
/**
* Alters the matrix <i>C</i> to be equal to <i>C = C+A*B<sup>T</sup></i>
* @param B the matrix to multiply <i>this</i> with
* @param C the matrix to add the result to
*/
abstract public void multiplyTranspose(final Matrix B, final Matrix C);
/**
* Returns the new matrix <i>C</i> that is <i>C = A*B<sup>T</sup></i>
* @param B the matrix to multiply by the transpose of
* @return the result C
*/
public Matrix multiplyTranspose(final Matrix B)
{
Matrix C = new DenseMatrix(this.rows(), B.rows());
multiplyTranspose(B, C);
return C;
}
/**
* Alters the matrix <i>C</i> to be equal to <i>C = C+A*B<sup>T</sup></i>
* @param B the matrix to multiply this with
* @param C the matrix to add the result to
* @param threadPool the source of threads to do computation in parallel
*/
abstract public void multiplyTranspose(final Matrix B, final Matrix C, ExecutorService threadPool);
/**
* Returns the new matrix <i>C</i> that is <i>C = A*B<sup>T</sup></i>
* @param B the matrix to multiply by the transpose of
* @param threadPool the source of threads to do computation in parallel
* @return the result C
*/
public Matrix multiplyTranspose(final Matrix B, ExecutorService threadPool)
{
Matrix C = new DenseMatrix(this.rows(), B.rows());
multiplyTranspose(B, C, threadPool);
return C;
}
/**
* Creates a new Matrix that stores <i>A*c</i>
* @param c the scalar constant to multiply by
* @return a new vector <i>A*c</i>
*/
public Matrix multiply(double c)
{
Matrix toReturn = getThisSideMatrix(null);
toReturn.mutableMultiply(c);
return toReturn;
}
/**
* Creates a new Matrix that stores <i>A*c</i>
* @param c the scalar constant to multiply by
* @param threadPool the source of threads to do computation in parallel
* @return a new matrix equal to <i>A*c</i>
*/
public Matrix multiply(double c, ExecutorService threadPool)
{
Matrix toReturn = getThisSideMatrix(null);
toReturn.mutableMultiply(c, threadPool);
return toReturn;
}
/**
* Alters the current matrix to be equal to <i>A*c</i>
* @param c the scalar constant to multiply by
*/
abstract public void mutableMultiply(double c);
/**
* Alters the current matrix to be equal to <i>A*c</i>
* @param c the scalar constant to multiply by
* @param threadPool the source of threads to do computation in parallel
*/
abstract public void mutableMultiply(double c, ExecutorService threadPool);
abstract public Matrix[] lup();
abstract public Matrix[] lup(ExecutorService threadPool);
abstract public Matrix[] qr();
abstract public Matrix[] qr(ExecutorService threadPool);
/**
* This method alters the size of a matrix, either adding or subtracting
* rows from the internal structure of the matrix. Every resize call may
* cause a new allocation internally, and should not be called for excessive
* changing of a matrix. All added rows/ columns will have values of zero.
* If a row / column is removed, it is always the bottom/right most row /
* column removed. Values of the removed rows / columns will be lost.
*
* @param newRows the new number of rows, must be positive
* @param newCols the new number of columns, must be positive.
*/
abstract public void changeSize(int newRows, int newCols);
/**
* Transposes the current matrix in place, altering its value.
* Only valid for square matrices
*/
abstract public void mutableTranspose();
/**
* Returns a new matrix that is the transpose of this matrix.
* @return a new matrix <tt>A</tt>'
*/
public Matrix transpose()
{
Matrix toReturn = new DenseMatrix(cols(), rows());
this.transpose(toReturn);
return toReturn;
}
/**
* Overwrites the values stored in matrix <i>C</i> to store the value of
* <i>A'</i>
* @param C the matrix to store the transpose of the current matrix
* @throws ArithmeticException if the dimensions of <i>C</i> do not match
* the dimensions of <i>this'</i>
*/
abstract public void transpose(Matrix C);
/**
* Creates a new matrix equal to <i>A'*B</i>, or the same result as <br>
* <code>
* A.{@link #transpose() transpose()}.{@link #multiply(jsat.linear.Matrix) multiply(B)}
* </code>
*
* @param B the other Matrix
* @return a new matrix equal to <i>A'*B</i>
*/
public Matrix transposeMultiply(Matrix B)
{
Matrix C = new DenseMatrix(this.cols(), B.cols());
transposeMultiply(B, C);
return C;
}
/**
* Alters the matrix <i>C</i> so that <i>C = C + A'*B</i>
* @param B the matrix to multiply by
* @param C the matrix to add the result to
*/
abstract public void transposeMultiply(Matrix B, Matrix C);
/**
* Computes the result matrix of <i>A'*B</i>, or the same result as <br>
* <code>
* A.{@link #transpose() transpose()}.{@link #multiply(jsat.linear.Matrix) multiply(B)}
* </code>
*
* @param B the matrix to multiply by
* @param threadPool the source of threads to do computation in parallel
* @return a new matrix equal to <i>A'*B</i>
*/
public Matrix transposeMultiply(Matrix B, ExecutorService threadPool)
{
Matrix C = new DenseMatrix(this.cols(), B.cols());
transposeMultiply(B, C, threadPool);
return C;
}
/**
* Alters the matrix <i>C</i> so that <i>C = C + A'*B</i>
* @param B the matrix to multiply by
* @param C the matrix to place the results in
* @param threadPool the source of threads to do computation in parallel
*/
abstract public void transposeMultiply(Matrix B, Matrix C, ExecutorService threadPool);
/**
* Alters the vector <i><b>x</b></i> to be equal to <i><b>x</b> = <b>x</b> + A'*<b>b</b>*c</i>
*
* @param c the scalar constant to multiply by
* @param b the vector to multiply by
* @param x the vector the add the result to
*/
abstract public void transposeMultiply(double c, Vec b, Vec x);
/**
* Creates a new vector equal to <i><b>x</b> = A'*<b>b</b>*c</i>
* @param c the scalar constant to multiply by
* @param b the vector to multiply by
* @return the new vector equal to <i>A'*b*c</i>
*/
public Vec transposeMultiply(double c, Vec b)
{
DenseVector toReturns = new DenseVector(this.cols());
this.transposeMultiply(c, b, toReturns);
return toReturns;
}
/**
* Returns the value stored at at the matrix position <i>A<sub>i,j</sub></i>
* @param i the row, starting from 0
* @param j the column, starting from 0
* @return the value at <i>A<sub>i,j</sub></i>
*/
abstract public double get(int i, int j);
/**
* Sets the value stored at at the matrix position <i>A<sub>i,j</sub></i>
* @param i the row, starting from 0
* @param j the column, starting from 0
* @param value the value to place at <i>A<sub>i,j</sub></i>
*/
abstract public void set(int i, int j, double value);
/**
* Alters the current matrix at index <i>(i,j)</i> to be equal to
* <i>A<sub>i,j</sub> = A<sub>i,j</sub> + value</i>
* @param i the row, starting from 0
* @param j the column, starting from 0
* @param value the value to add to the matrix coordinate
*/
public void increment(int i, int j, double value)
{
if(Double.isNaN(value) || Double.isInfinite(value))
throw new ArithmeticException("Can not add a value " + value);
set(i, j, get(i, j)+value);
}
/**
* Returns the number of rows stored in this matrix
* @return the number of rows stored in this matrix
*/
abstract public int rows();
/**
* Returns the number of columns stored in this matrix
* @return the number of columns stored in this matrix
*/
abstract public int cols();
/**
* Returns {@code true} if the matrix is sparse, {@code false} otherwise
* @return {@code true} if the matrix is sparse, {@code false} otherwise
*/
abstract public boolean isSparce();
/**
* Returns the number of non zero values stored in this matrix. This is
* mostly useful for sparse matrices.
*
* @return the number of non zero values stored in this matrix.
*/
public long nnz()
{
return ((long)rows())*cols();
}
/**
* Returns {@code true} if the matrix is square, meaning it has the same
* number of {@link #rows() rows} and {@link #cols() columns}.
* @return {@code true} if this matrix is square, {@code false} if it is
* rectangular.
*/
public boolean isSquare()
{
return rows() == cols();
}
/**
* Alters the current matrix by swapping the values stored in two different
* rows.
* @param r1 the first row to swap
* @param r2 the second row to swap
*/
abstract public void swapRows(int r1, int r2);
/**
* Creates a vector that has a copy of the values in column <i>j</i> of this
* matrix. Altering it will not effect the values in <i>this</i> matrix
* @param j the column to copy
* @return a clone of the column as a {@link Vec}
*/
public Vec getColumn(int j)
{
if(j < 0 || j >= cols())
throw new ArithmeticException("Column was not a valid value " + j + " not in [0," + (cols()-1) + "]");
DenseVector c = new DenseVector(rows());
for(int i =0; i < rows(); i++)
c.set(i, get(i, j));
return c;
}
/**
* Obtains a vector that is backed by <i>this</i>, at very little memory
* cost. Mutations to this vector will alter the values stored in the
* matrix, and vice versa.
*
* @param j the column to obtain a view of
* @return a vector backed by the specified row of the matrix
*/
public Vec getColumnView(final int j)
{
final Matrix M = this;
return new Vec()
{
private static final long serialVersionUID = 7107290189250645384L;
@Override
public int length()
{
return rows();
}
@Override
public double get(int index)
{
return M.get(index, j);
}
@Override
public void set(int index, double val)
{
M.set(index, j, val);
}
@Override
public boolean isSparse()
{
return M.isSparce();
}
@Override
public Vec clone()
{
if(M.isSparce())
return new SparseVector(this);
else
return new DenseVector(this);
}
@Override
public void setLength(int length)
{
throw new UnsupportedOperationException("Vector view can't not extend original matrix");
}
};
}
/**
* Creates a vector that has a copy of the values in row <i>i</i> of this
* matrix. Altering it will not effect the values in <i>this</i> matrix.
* @param r the row to copy
* @return a clone of the row as a {@link Vec}
*/
public Vec getRow(int r)
{
if(r < 0 || r >= rows())
throw new ArithmeticException("Row was not a valid value " + r + " not in [0," + (rows()-1) + "]");
DenseVector c = new DenseVector(cols());
for(int j =0; j < cols(); j++)
c.set(j, get(r, j));
return c;
}
/**
* Obtains a vector that is backed by <i>this</i>, at very little memory
* cost. Mutations to this vector will alter the values stored in the
* matrix, and vice versa.
*
* @param r the row to obtain a view of
* @return a vector backed by the specified row of the matrix
*/
public Vec getRowView(final int r)
{
final Matrix M = this;
return new Vec()
{
private static final long serialVersionUID = 8484494698777822563L;
@Override
public int length()
{
return M.cols();
}
@Override
public double get(int index)
{
return M.get(r, index);
}
@Override
public void set(int index, double val)
{
M.set(r, index, val);
}
@Override
public boolean isSparse()
{
return M.isSparce();
}
@Override
public Vec clone()
{
if(M.isSparce())
return new SparseVector(this);
else
return new DenseVector(this);
}
@Override
public void setLength(int length)
{
throw new UnsupportedOperationException("Vector view can not extend original matrix");
}
};
}
@Override
public String toString()
{
StringBuilder sb = new StringBuilder(rows()*cols());
sb.append("[");
for(int i = 0; i < rows(); i++)
{
sb.append(get(i, 0));
for(int j = 1; j < cols(); j++)
{
sb.append(", ").append(get(i, j));
}
sb.append(";");
}
sb.append("]");
return sb.toString();
}
/**
* Convenience method that will return {@code true} only if the two input
* matrices have the exact same dimensions.
* @param A the first matrix
* @param B the second matrix
* @return {@code true} if they have the exact same dimensions,
* {@code false} otherwise.
*/
public static boolean sameDimensions(Matrix A, Matrix B)
{
return A.rows() == B.rows() && A.cols() == B.cols();
}
/**
* Convenience method that will return {@code true} only if the two input
* matrices have dimensions compatible for multiplying <i>A*B</i>
* @param A the first matrix
* @param B the second matrix
* @return {@code true} if they have dimensions allowing multiplication,
* {@code false} otherwise.
*/
public static boolean canMultiply(Matrix A, Matrix B)
{
return A.cols() == B.rows();
}
@Override
public boolean equals(Object obj)
{
if(obj == null || !(obj instanceof Matrix))
return false;
Matrix that = (Matrix) obj;
if(this.rows() != that.rows() || this.cols() != that.cols())
return false;
for(int i = 0; i < rows(); i++)
for(int j = 0; j < cols(); j++)
if(this.get(i, j) != that.get(i, j))
return false;
return true;
}
/**
* Performs the same as {@link #equals(java.lang.Object) }, but allows a
* leniency in the differences between matrix values. This is useful for
* when some amount of numerical error is expected
*
* @param obj the other matrix
* @param range the max acceptable difference between two cell values
* @return {@code true} if the difference between the values of each pair of
* matrix elements are less than or equal to <i>range</i>
*/
public boolean equals(Object obj, double range)
{
if(obj == null || !(obj instanceof Matrix))
return false;
Matrix that = (Matrix) obj;
if(this.rows() != that.rows() || this.cols() != that.cols())
return false;
for(int i = 0; i < rows(); i++)
for(int j = 0; j < cols(); j++)
if(Math.abs(this.get(i, j)-that.get(i, j)) > range)
return false;
return true;
}
/**
* Alters the current matrix so that all values are equal to zero.
*/
abstract public void zeroOut();
/**
* Copes the values of this matrix into the other matrix of the same dimensions
* @param other the matrix to overwrite the values of
*/
public void copyTo(Matrix other)
{
if (this.rows() != other.rows() || this.cols() != other.cols())
throw new ArithmeticException("Matrices are not of the same dimension");
for(int i = 0; i < rows(); i++)
this.getRowView(i).copyTo(other.getRowView(i));
}
/**
* Alters row i of <i>this</i> matrix, such that
* <i>A[i,:] = A[i,:] + c*<b>b</b></i>
* @param i the index of the row to update
* @param c the scalar constant to multiply the vector by
* @param b the vector to add to the specified row
*/
public void updateRow(int i, double c, Vec b)
{
if(b.length() != this.cols())
throw new ArithmeticException("vector is not of the same column length");
if (b.isSparse())
for (IndexValue iv : b)
this.increment(i, iv.getIndex(), c * iv.getValue());
else
for (int j = 0; j < b.length(); j++)
this.increment(i, j, c * b.get(j));
}
/**
* Computes the Frobenius norm of the given matrix
* @return the Frobenius norm of the given matrix
*/
public double frobenius()
{
double f = 0;
for(int i = 0; i < this.rows(); i++)
for(IndexValue iv : this.getRowView(i))
f += Math.pow(iv.getValue(), 2);
return Math.sqrt(f+1e-15);
}
/**
* Alters the matrix <i>A</i> such that,
* <i>A = A + c * <b>x</b> * <b>y</b>'</i>
*
* @param A the matrix to update
* @param x the first vector
* @param y the second vector
* @param c the scalar constant to multiply the outer product by
* @throws ArithmeticException if the vector dimensions are not compatible
* with the matrix <i>A</i>
*/
public static void OuterProductUpdate(Matrix A, Vec x, Vec y, double c)
{
if (x.length() != A.rows() || y.length() != A.cols())
throw new ArithmeticException("Matrix dimensions do not agree with outer product");
if (x.isSparse())
for (IndexValue iv : x)
A.updateRow(iv.getIndex(), iv.getValue() * c, y);
else
for (int i = 0; i < x.length(); i++)
{
double rowCosnt = c * x.get(i);
A.updateRow(i, rowCosnt, y);
}
}
/**
* Alters the matrix <i>A</i> such that,
* <i>A = A + c * <b>x</b> * <b>y</b>'</i>
*
* @param A the matrix to update
* @param x the first vector
* @param y the second vector
* @param c the scalar constant to multiply the outer product by
* @param threadpool the source of threads to do computation in parallel
*/
public static void OuterProductUpdate(final Matrix A, final Vec x, final Vec y, final double c, ExecutorService threadpool)
{
if(x.length() != A.rows() || y.length() != A.cols())
throw new ArithmeticException("Matrix dimensions do not agree with outer product");
if (x.isSparse())
{
final ModifiableCountDownLatch mcdl = new ModifiableCountDownLatch(1);
for (final IndexValue iv : x)
{
mcdl.countUp();
threadpool.submit(new Runnable()
{
@Override
public void run()
{
A.updateRow(iv.getIndex(), iv.getValue() * c, y);
mcdl.countDown();
}
});
}
mcdl.countDown();
try
{
mcdl.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(Matrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
else
{
final CountDownLatch latch = new CountDownLatch(LogicalCores);
for(int id = 0; id < LogicalCores; id++)
{
final int threadID = id;
threadpool.submit(new Runnable()
{
@Override
public void run()
{
for(int i = threadID; i < x.length(); i+=LogicalCores)
{
double rowCosnt = c*x.get(i);
A.updateRow(i, rowCosnt, y);
}
latch.countDown();
}
});
}
try
{
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(Matrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
}
/**
* Creates a new dense identity matrix with <i>k</i> rows and columns.
* @param k the number of rows / columns
* @return a new dense identity matrix <i>I<sub>k</sub></i>
*/
public static DenseMatrix eye(int k)
{
DenseMatrix eye = new DenseMatrix(k, k);
for(int i = 0; i < k; i++ )
eye.set(i, i, 1);
return eye;
}
/**
* Creates a new dense matrix filled with random values from
* {@link Random#nextDouble() }
*
* @param rows the number of rows for the matrix
* @param cols the number of columns for the matrix
* @param rand the source of randomness
* @return a new dense matrix full of random values
*/
public static DenseMatrix random(int rows, int cols, Random rand)
{
DenseMatrix m = new DenseMatrix(rows, cols);
for(int i = 0; i < rows; i++)
for(int j = 0; j < cols; j++)
m.set(i, j, rand.nextDouble());
return m;
}
/**
* Returns a new dense square matrix such that the main diagonal contains
* the values given in <tt>a</tt>
* @param a the diagonal values of a matrix
* @return the diagonal matrix represent by <i>a</i>
*/
public static Matrix diag(Vec a)
{
DenseMatrix A = new DenseMatrix(a.length(), a.length());
for(Iterator<IndexValue> iter = a.getNonZeroIterator(); iter.hasNext();)
{
IndexValue iv = iter.next();
A.set(iv.getIndex(), iv.getIndex(), iv.getValue());
}
return A;
}
/**
* Alters the matrix <i>A</i> so that it contains the result of <i>A</i>
* times a sparse matrix represented by only its diagonal values or
* <i>A = A*diag(<b>b</b>)</i>. This is equivalent to the code
* <code>
* A = A{@link #multiply(jsat.linear.Matrix) .multiply}
* ({@link #diag(jsat.linear.Vec) diag}(b))
* </code>
* @param A the square matrix to update
* @param b the diagonal value vector
*/
public static void diagMult(Matrix A, Vec b)
{
if(A.cols() != b.length())
throw new ArithmeticException("Could not multiply, matrix dimensions must agree");
for(int i = 0; i < A.rows(); i++)
RowColumnOps.multRow(A, i, b);
}
/**
* Alters the matrix <i>A</i> so that it contains the result of
* sparse matrix represented by only its diagonal values times <i>A</i> or
* <i>A = diag(<b>b</b>)*A</i>. This is equivalent to the code
* <code>
* b{@link Vec#multiply(jsat.linear.Matrix) .multiply}
* ({@link #diag(jsat.linear.Vec) diag}(A))
* </code>
* @param b the diagonal value vector
* @param A the square matrix to update
*/
public static void diagMult(Vec b, Matrix A)
{
if(A.rows() != b.length())
throw new ArithmeticException("Could not multiply, matrix dimensions must agree");
for(int i = 0; i < A.rows(); i++)
RowColumnOps.multRow(A, i, b.get(i));
}
/**
* Checks to see if the given input is approximately symmetric. Rounding
* errors may cause the computation of a matrix to come out non symmetric,
* where |a[i,h] - a[j, i]| < eps. Despite these errors, it may be
* preferred to treat the matrix as perfectly symmetric regardless.
*
* @param A the input matrix
* @param eps the maximum tolerable difference between two entries
* @return {@code true} if the matrix is approximately symmetric
*/
public static boolean isSymmetric(Matrix A, double eps)
{
if(!A.isSquare())
return false;
for(int i = 0; i < A.rows(); i++)
for(int j = i+1; j < A.cols(); j++)
if( Math.abs(A.get(i, j)-A.get(j, i)) > eps)
return false;
return true;
}
/**
* Checks to see if the given input is a perfectly symmetric matrix
* @param A the input matrix
* @return {@code true} if it is perfectly symmetric.
*/
public static boolean isSymmetric(Matrix A)
{
return isSymmetric(A, 0.0);
}
/**
* Creates a new square matrix that is a pascal matrix. The pascal matrix of
* size <i>n</i> is <i>n</i> by <i>n</i> and symmetric.
*
* @param size the number of rows and columns for the matrix
* @return a pascal matrix of the desired size
*/
public static Matrix pascal(int size)
{
if(size <= 0 )
throw new ArithmeticException();
DenseMatrix P = new DenseMatrix(size, size);
RowColumnOps.fillRow(P, 0, 0, size, 1.0);
RowColumnOps.fillCol(P, 0, 0, size, 1.0);
for(int i = 1; i < size; i++)
for(int j = 1; j < size; j++)
P.set(i, j, P.get(i-1, j) + P.get(i, j-1));
return P;
}
@Override
abstract public Matrix clone();
}
| 37,415 | 32.023831 | 128 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/MatrixOfVecs.java | package jsat.linear;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* This class provides a base mechanism to create a Matrix 'view' from a list of
* {@link Vec} objects. The vector objects will be used to back the rows of the
* matrix, so changes to one are shown in the other. If the matirx is altered
* using {@link #changeSize(int, int) }, this may no longer be true. <br>
* Row oriented operations are implemented to defer to the base vector objects
* used.
*
* @author Edward Raff
*/
public class MatrixOfVecs extends GenericMatrix
{
private static final long serialVersionUID = 6120353195388663462L;
private List<Vec> rows;
/**
* Creates a new Matrix of Vecs from the given array of Vec objects. All Vec
* objects contained should be of the same length.
*
* @param rows the rows of vecs to make this matrix.
*/
public MatrixOfVecs(Vec... rows)
{
this(Arrays.asList(rows));
}
/**
* Creates a new Matrix of Vecs from the given list of Vec objects. All Vec
* objects contained should be of the same length.
*
* @param rows the rows of vecs to make this matrix.
*/
public MatrixOfVecs(List<Vec> rows)
{
this.rows = new ArrayList<Vec>(rows);
int cols = rows.get(0).length();
for(Vec v : rows)
if(cols != v.length())
throw new IllegalArgumentException("Row vectors must all be of the same length");
}
/**
* Creates a new Matrix of Vecs of the desired size.
* @param rows the number of rows in the matrix
* @param cols the number of columns in the matrix
* @param sparse if {@code true} {@link SparseVector} objects will be used
* for the rows. Else {@link DenseVector} will be used.
*/
public MatrixOfVecs(int rows, int cols, boolean sparse)
{
this.rows = new ArrayList<Vec>(rows);
for(int i = 0; i < rows; i++)
this.rows.add(sparse ? new SparseVector(cols) : new DenseVector(cols));
}
@Override
protected Matrix getMatrixOfSameType(int rows, int cols)
{
return new MatrixOfVecs(rows, cols, isSparce());
}
@Override
public void changeSize(int newRows, int newCols)
{
if(newRows <= 0 || newCols <= 0)
throw new IllegalArgumentException("Rows and columns must be positive, new dimension of [" + newRows + "," + newCols + "] is invalid");
//change cols first, add new rows of the correct size after
if(newCols != cols())
{
for(int i = 0; i < rows(); i++)
{
Vec orig = rows.get(i);
Vec newV = orig.isSparse() ? new SparseVector(newCols) : new DenseVector(newCols);
if(newCols < orig.length())
new SubVector(0, newCols, orig).copyTo(newV);
else
orig.copyTo(new SubVector(0, orig.length(), newV));
rows.set(i, newV);
}
}
if(newRows < rows())
rows.subList(newRows, rows()).clear();
else if(newRows > rows())
while(rows.size() < newRows)
{
Vec newV = rows.get(rows.size()-1).clone();
newV.zeroOut();
rows.add(newV);
}
}
@Override
public double get(int i, int j)
{
if(i >= rows() || i < 0)
throw new IndexOutOfBoundsException("row " + i + " is not a valid index");
else if(j >= cols() || j < 0)
throw new IndexOutOfBoundsException("column " + j + " is not a valid index");
return rows.get(i).get(j);
}
@Override
public void set(int i, int j, double value)
{
if(i >= rows() || i < 0)
throw new IndexOutOfBoundsException("row " + i + " is not a valid index");
else if(j >= cols() || j < 0)
throw new IndexOutOfBoundsException("column " + j + " is not a valid index");
rows.get(i).set(j, value);
}
@Override
public void increment(int i, int j, double value)
{
if(i >= rows() || i < 0)
throw new IndexOutOfBoundsException("row " + i + " is not a valid index");
else if(j >= cols() || j < 0)
throw new IndexOutOfBoundsException("column " + j + " is not a valid index");
rows.get(i).increment(j, value);
}
@Override
public int rows()
{
return rows.size();
}
@Override
public int cols()
{
return rows.get(0).length();
}
@Override
public Vec getRowView(int r)
{
if(r >= rows() || r < 0)
throw new IndexOutOfBoundsException("row " + r + " is not a valid index");
return rows.get(r);
}
@Override
public void updateRow(int i, double c, Vec b)
{
rows.get(i).mutableAdd(c, b);
}
@Override
public void multiply(Vec b, double z, Vec c)
{
if(this.cols() != b.length())
throw new ArithmeticException("Matrix dimensions do not agree, [" + rows() +"," + cols() + "] x [" + b.length() + ",1]" );
if(this.rows() != c.length())
throw new ArithmeticException("Target vector dimension does not agree with matrix dimensions. Matrix has " + rows() + " rows but tagert has " + c.length());
for (int i = 0; i < rows(); i++)
{
double dot = this.rows.get(i).dot(b);
c.increment(i, dot * z);
}
}
@Override
public void mutableMultiply(double c)
{
for(Vec row : rows)
row.mutableMultiply(c);
}
@Override
public void mutableAdd(double c)
{
for(Vec row : rows)
row.mutableAdd(c);
}
@Override
public void zeroOut()
{
for(Vec row : rows)
row.zeroOut();
}
@Override
public MatrixOfVecs clone()
{
MatrixOfVecs clone = new MatrixOfVecs(rows);
for(int i = 0; i < clone.rows.size(); i++)
clone.rows.set(i, clone.rows.get(i).clone());
return clone;
}
@Override
public boolean isSparce()
{
for(Vec v : rows)//TODO probably keep this in a bool
if(v.isSparse())
return true;
return false;
}
}
| 6,362 | 29.014151 | 168 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/MatrixStatistics.java |
package jsat.linear;
import static java.lang.Math.pow;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.PriorityQueue;
import java.util.Random;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.distributions.ChiSquared;
import jsat.linear.distancemetrics.MahalanobisDistance;
import jsat.utils.IndexTable;
import jsat.utils.IntList;
import jsat.utils.IntSet;
import jsat.utils.ListUtils;
import jsat.utils.Tuple3;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* This class provides methods useful for statistical operations that involve matrices and vectors.
*
* @author Edward Raff
*/
public class MatrixStatistics
{
private MatrixStatistics()
{
}
/**
* Computes the mean of the given data set.
* @param <V> the vector type
* @param dataSet the list of vectors to compute the mean of
* @return the mean of the vectors
*/
public static <V extends Vec> Vec meanVector(List<V> dataSet)
{
if(dataSet.isEmpty())
throw new ArithmeticException("Can not compute the mean of zero data points");
Vec mean = new DenseVector(dataSet.get(0).length());
meanVector(mean, dataSet);
return mean;
}
/**
* Computes the weighted mean of the given data set.
*
* @param dataSet the dataset to compute the mean from
* @return the mean of the numeric vectors in the data set
*/
public static Vec meanVector(DataSet dataSet)
{
DenseVector dv = new DenseVector(dataSet.getNumNumericalVars());
meanVector(dv, dataSet);
return dv;
}
/**
* Computes the mean of the given data set.
*
* @param mean the zeroed out vector to store the mean in. Its contents will be altered
* @param dataSet the set of data points to compute the mean from
*/
public static <V extends Vec> void meanVector(Vec mean, List<V> dataSet)
{
if(dataSet.isEmpty())
throw new ArithmeticException("Can not compute the mean of zero data points");
else if(dataSet.get(0).length() != mean.length())
throw new ArithmeticException("Vector dimensions do not agree");
for (Vec x : dataSet)
mean.mutableAdd(x);
mean.mutableDivide(dataSet.size());
}
/**
* Computes the mean of the given data set.
*
* @param <V>
* @param mean the zeroed out vector to store the mean in. Its contents will be altered
* @param dataSet the set of data points to compute the mean from
* @param subset the indices of the points in dataSet to take the mean of
*/
public static <V extends Vec> void meanVector(Vec mean, List<V> dataSet, Collection<Integer> subset)
{
if(dataSet.isEmpty())
throw new ArithmeticException("Can not compute the mean of zero data points");
else if(dataSet.get(0).length() != mean.length())
throw new ArithmeticException("Vector dimensions do not agree");
for(int i : subset)
mean.mutableAdd(dataSet.get(i));
mean.mutableDivide(subset.size());
}
/**
* Computes the weighted mean of the data set
* @param mean the zeroed out vector to store the mean in. Its contents will be altered
* @param dataSet the set of data points to compute the mean from
*/
public static void meanVector(Vec mean, DataSet dataSet)
{
if(dataSet.size() == 0)
throw new ArithmeticException("Can not compute the mean of zero data points");
double sumOfWeights = 0;
for(int i = 0; i < dataSet.size(); i++)
{
DataPoint dp = dataSet.getDataPoint(i);
double w = dataSet.getWeight(i);
sumOfWeights += w;
mean.mutableAdd(w, dp.getNumericalValues());
}
mean.mutableDivide(sumOfWeights);
}
public static <V extends Vec> Matrix covarianceMatrix(Vec mean, List<V> dataSet)
{
Matrix coMatrix = new DenseMatrix(mean.length(), mean.length());
covarianceMatrix(mean, coMatrix, dataSet);
return coMatrix;
}
public static <V extends Vec> void covarianceMatrix(Vec mean, Matrix covariance, List<V> dataSet)
{
if(!covariance.isSquare())
throw new ArithmeticException("Storage for covariance matrix must be square");
else if(covariance.rows() != mean.length())
throw new ArithmeticException("Covariance Matrix size and mean size do not agree");
else if(dataSet.isEmpty())
throw new ArithmeticException("No data points to compute covariance from");
else if(mean.length() != dataSet.get(0).length())
throw new ArithmeticException("Data vectors do not agree with mean and covariance matrix");
/**
* Covariance definition
*
* n
* ===== T
* \ / _\ / _\
* > |x - x| |x - x|
* / \ i / \ i /
* =====
* i = 1
*
*/
Vec scratch = new DenseVector(mean.length());
for (Vec x : dataSet)
{
x.copyTo(scratch);
scratch.mutableSubtract(mean);
Matrix.OuterProductUpdate(covariance, scratch, scratch, 1.0);
}
covariance.mutableMultiply(1.0 / (dataSet.size() - 1.0));
}
public static <V extends Vec> void covarianceMatrix(Vec mean, Matrix covariance, List<V> dataSet, Collection<Integer> subset)
{
if(!covariance.isSquare())
throw new ArithmeticException("Storage for covariance matrix must be square");
else if(covariance.rows() != mean.length())
throw new ArithmeticException("Covariance Matrix size and mean size do not agree");
else if(dataSet.isEmpty())
throw new ArithmeticException("No data points to compute covariance from");
else if(mean.length() != dataSet.get(0).length())
throw new ArithmeticException("Data vectors do not agree with mean and covariance matrix");
/**
* Covariance definition
*
* n
* ===== T
* \ / _\ / _\
* > |x - x| |x - x|
* / \ i / \ i /
* =====
* i = 1
*
*/
Vec scratch = new DenseVector(mean.length());
for(int i : subset)
{
dataSet.get(i).copyTo(scratch);
scratch.mutableSubtract(mean);
Matrix.OuterProductUpdate(covariance, scratch, scratch, 1.0);
}
covariance.mutableMultiply(1.0 / (subset.size() - 1.0));
}
/**
* Computes the weighted result for the covariance matrix of the given data set.
* If all weights have the same value, the result will come out equivalent to
* {@link #covarianceMatrix(jsat.linear.Vec, java.util.List) }
*
* @param mean the mean of the distribution.
* @param dataSet the set of data points that contain vectors
* @param covariance the zeroed matrix to store the result in. Its values will be altered.
*/
public static void covarianceMatrix(Vec mean, DataSet dataSet, Matrix covariance)
{
double sumOfWeights = 0.0, sumOfSquaredWeights = 0.0;
for(int i = 0; i < dataSet.size(); i++)
{
sumOfWeights += dataSet.getWeight(i);
sumOfSquaredWeights += Math.pow(dataSet.getWeight(i), 2);
}
covarianceMatrix(mean, dataSet, covariance, sumOfWeights, sumOfSquaredWeights);
}
/**
* Computes the weighted result for the covariance matrix of the given data set.
* If all weights have the same value, the result will come out equivalent to
* {@link #covarianceMatrix(jsat.linear.Vec, java.util.List) }
*
* @param mean the mean of the distribution.
* @param dataSet the set of data points that contain vectors
* @param covariance the zeroed matrix to store the result in. Its values will be altered.
* @param sumOfWeights the sum of each weight in <tt>dataSet</tt>
* @param sumOfSquaredWeights the sum of the squared weights in <tt>dataSet</tt>
*/
public static void covarianceMatrix(Vec mean, DataSet dataSet, Matrix covariance, double sumOfWeights, double sumOfSquaredWeights)
{
if (!covariance.isSquare())
throw new ArithmeticException("Storage for covariance matrix must be square");
else if (covariance.rows() != mean.length())
throw new ArithmeticException("Covariance Matrix size and mean size do not agree");
else if (dataSet.isEmpty())
throw new ArithmeticException("No data points to compute covariance from");
else if (mean.length() != dataSet.getNumNumericalVars())
throw new ArithmeticException("Data vectors do not agree with mean and covariance matrix");
/**
* Weighted definition of the covariance matrix
*
* n
* =====
* \
* > w
* / i n
* ===== ===== T
* i = 1 \ / _\ / _\
* ---------------------- > w |x - x| |x - x|
* 2 / i \ i / \ i /
* / n \ n =====
* |===== | ===== i = 1
* |\ | \ 2
* | > w | - > w
* |/ i| / i
* |===== | =====
* \i = 1 / i = 1
*/
Vec scratch = new DenseVector(mean.length());
for (int i = 0; i < dataSet.size(); i++)
{
DataPoint dp = dataSet.getDataPoint(i);
Vec x = dp.getNumericalValues();
x.copyTo(scratch);
scratch.mutableSubtract(mean);
Matrix.OuterProductUpdate(covariance, scratch, scratch, dataSet.getWeight(i));
}
covariance.mutableMultiply(sumOfWeights / (Math.pow(sumOfWeights, 2) - sumOfSquaredWeights));
}
/**
* Computes the weighted covariance matrix of the data set
* @param mean the mean of the data set
* @param dataSet the dataset to compute the covariance of
* @return the covariance matrix of the data set
*/
public static Matrix covarianceMatrix(Vec mean, DataSet dataSet)
{
Matrix covariance = new DenseMatrix(mean.length(), mean.length());
covarianceMatrix(mean, dataSet, covariance);
return covariance;
}
/**
* Computes the weighted diagonal of the covariance matrix, which is the
* standard deviations of the columns of all values.
*
* @param means the already computed mean of the data set
* @param diag the zeroed out vector to store the diagonal in. Its contents
* will be altered
* @param dataset the data set to compute the covariance diagonal from
*/
public static void covarianceDiag(Vec means, Vec diag, DataSet dataset)
{
final int n = dataset.size();
final int d = dataset.getNumNumericalVars();
int[] nnzCounts = new int[d];
double sumOfWeights = 0;
for(int i = 0; i < n; i++)
{
DataPoint dp = dataset.getDataPoint(i);
double w = dataset.getWeight(i);
sumOfWeights += w;
Vec x = dataset.getDataPoint(i).getNumericalValues();
for(IndexValue iv : x)
{
int indx = iv.getIndex();
nnzCounts[indx]++;
diag.increment(indx, w*pow(iv.getValue()-means.get(indx), 2));
}
}
//add zero observations
for(int i = 0; i < nnzCounts.length; i++)
diag.increment(i, pow(means.get(i), 2)*(n-nnzCounts[i]) );
diag.mutableDivide(sumOfWeights);
}
/**
* Computes the weighted diagonal of the covariance matrix, which is the
* standard deviations of the columns of all values.
*
* @param means the already computed mean of the data set
* @param dataset the data set to compute the covariance diagonal from
* @return the diagonal of the covariance matrix for the given data
*/
public static Vec covarianceDiag(Vec means, DataSet dataset)
{
DenseVector diag = new DenseVector(dataset.getNumNumericalVars());
covarianceDiag(means, diag, dataset);
return diag;
}
/**
* Computes the diagonal of the covariance matrix, which is the standard
* deviations of the columns of all values.
*
* @param <V> the type of the vector
* @param means the already computed mean of the data set
* @param diag the zeroed out vector to store the diagonal in. Its contents
* will be altered
* @param dataset the data set to compute the covariance diagonal from
*/
public static <V extends Vec> void covarianceDiag(Vec means, Vec diag, List<V> dataset)
{
final int n = dataset.size();
final int d = dataset.get(0).length();
int[] nnzCounts = new int[d];
for(int i = 0; i < n; i++)
{
Vec x = dataset.get(i);
for(IndexValue iv : x)
{
int indx = iv.getIndex();
nnzCounts[indx]++;
diag.increment(indx, pow(iv.getValue()-means.get(indx), 2));
}
}
//add zero observations
for(int i = 0; i < nnzCounts.length; i++)
diag.increment(i, pow(means.get(i), 2)*(n-nnzCounts[i]) );
diag.mutableDivide(n);
}
/**
* Computes the diagonal of the covariance matrix, which is the standard
* deviations of the columns of all values.
*
* @param <V> the type of the vector
* @param means the already computed mean of the data set
* @param diag the zeroed out vector to store the diagonal in. Its contents
* will be altered
* @param dataset the data set to compute the covariance diagonal from
* @param subset the indices of the points in dataSet to take the mean of
*/
public static <V extends Vec> void covarianceDiag(Vec means, Vec diag, List<V> dataset, Collection<Integer> subset)
{
final int n = subset.size();
final int d = dataset.get(0).length();
int[] nnzCounts = new int[d];
for(int i : subset)
{
Vec x = dataset.get(i);
for(IndexValue iv : x)
{
int indx = iv.getIndex();
nnzCounts[indx]++;
diag.increment(indx, pow(iv.getValue()-means.get(indx), 2));
}
}
//add zero observations
for(int i = 0; i < nnzCounts.length; i++)
diag.increment(i, pow(means.get(i), 2)*(n-nnzCounts[i]) );
diag.mutableDivide(n);
}
/**
* Computes the diagonal of the covariance matrix, which is the standard
* deviations of the columns of all values.
*
* @param <V>
* @param means the already computed mean of the data set
* @param dataset the data set to compute the covariance diagonal from
* @param subset
* @return the diagonal of the covariance matrix for the given data
*/
public static <V extends Vec> Vec covarianceDiag(Vec means, List<V> dataset, List<Integer> subset)
{
final int d = dataset.get(0).length();
DenseVector diag = new DenseVector(d);
covarianceDiag(means, diag, dataset);
return diag;
}
/**
* This algorithm implements the FastMCD algorithm for robustly estimating
* the mean and covariance of a dataset. Computational complexity increases
* linearly with the sample size {@code n}, but cubically with the dimension
* size {@code d}.<br>
* <br>
* See: Rousseeuw, P. J., & Driessen, K. Van. (1999). A Fast Algorithm for
* the Minimum Covariance Determinant Estimator. Technometrics, 41(3),
* 212–223. http://doi.org/10.2307/1270566
*
* @param <V>
* @param mean the location to store the estimated mean, values will be
* overwritten
* @param cov the location to store the estimated covariance, values will be
* overwritten
* @param dataset the set of data points to estimate the mean and covariance
* of
* @param parallel {@code true} if multiple cores should be used for
* estimation, {@code false} for single thread.
*/
public static <V extends Vec> void FastMCD(Vec mean, Matrix cov, List<V> dataset, boolean parallel)
{
final int N = dataset.size();
final int D = dataset.get(0).length();
final int h = (int) Math.ceil((N + D + 1) / 2.0);
mean.zeroOut();
cov.zeroOut();
if(h == N)
{
/*
* 2. If h, = n,, then the MCD location estimate T is the average of
* the whole dataset, and the MCD scatter estimate S is its
* covariance matrix. Report these and stop
*/
meanVector(mean, dataset);
covarianceMatrix(mean, cov, dataset);
return;
}
//Best results to store
double bestDet = Double.POSITIVE_INFINITY;
Vec bestMean = null;
Matrix bestCov = null;
if(N <= 600)
{
List<Tuple3<Double, Vec, Matrix>> top10 =
ParallelUtils.range(500, parallel)
.mapToObj(seed ->
{
Random rand = RandomUtil.getRandom(seed);
Vec subset_mean = mean.clone();
Matrix subset_cov = cov.clone();
IntList randOrder = ListUtils.range(0, N);
Collections.shuffle(randOrder, rand);
IntList h_prev = new IntList( randOrder.subList(0, D+1));
meanVector(subset_mean, dataset, h_prev);
covarianceMatrix(subset_mean, subset_cov, dataset, h_prev);
double det = 0;
//Run C step 3 times. 1 for intiailization from p-set, 2 for the 2 runs after
for(int i = 0; i < 3; i++)
det = MCD_C_step(subset_mean, subset_cov, dataset, h_prev, h, false);
return new Tuple3<>(det, subset_mean, subset_cov);
}).sorted((o1, o2) -> Double.compare(o1.getX(), o2.getX()))
.limit(10).collect(Collectors.toList());//get the top 10 best
for(Tuple3<Double, Vec, Matrix> initSolution : top10)
{
double prevDev = initSolution.getX();
IntList h_prev = new IntList(h);//This will get populated by the call to C_Step below
Vec m = initSolution.getY();
Matrix c = initSolution.getZ();
for(int iter = 0; iter < 20; iter++)
{
double newDet = MCD_C_step(m, c, dataset, h_prev, h, parallel);
if(Math.abs(newDet-prevDev) < 1e-9)//close enough to equal
break;
prevDev = newDet;
}
if(prevDev < bestDet)
{
bestCov = c;
bestMean = m;
bestDet = prevDev;
}
}
//return best solution
}
else//larger set
{
int numSplits;//How many sub groups should we produced?
if(N >= 1500)
numSplits = 5;
else
numSplits = (int) Math.floor(N/300.0);
//Populate the sub-splits
IntList randOrderAll = ListUtils.range(0, N);
Collections.shuffle(randOrderAll, RandomUtil.getLocalRandom());
IntList[] splits = new IntList[numSplits];
for(int i = 0; i < numSplits; i++)
splits[i] = new IntList();
for(int i = 0; i < Math.min(1500, randOrderAll.size()); i++)
splits[i % splits.length].add(randOrderAll.get(i));
//smaller value of h for each sub set
int h_sub = (splits[0].size()*h)/N;
//run process to get top 10 results for each subset 100x times
List<Tuple3<Double, Vec, Matrix>> fiftySolutions =
Arrays.asList(splits).stream().flatMap(split ->
{
//Create a stream of the top 10 results for each subset
return ParallelUtils.range(100, parallel)
.mapToObj(seed ->
{
Random rand = RandomUtil.getRandom(seed);
Vec subset_mean = mean.clone();
Matrix subset_cov = cov.clone();
IntList randOrderSplit = new IntList(split);
Collections.shuffle(randOrderSplit, rand);
IntList h_prev = new IntList( randOrderSplit.subList(0, D+1));
meanVector(subset_mean, dataset, h_prev);
covarianceMatrix(subset_mean, subset_cov, dataset, h_prev);
double det = 0;
//Run C step 3 times. 1 for intiailization from p-set, 2 for the 2 runs after
for(int i = 0; i < 3; i++)
det = MCD_C_step(subset_mean, subset_cov, dataset, h_prev, h_sub, false);
return new Tuple3<>(det, subset_mean, subset_cov);
}).sorted((o1, o2) -> Double.compare(o1.getX(), o2.getX()))
.limit(10);
}).collect(Collectors.toList());
//"in the merged set, repeat for each of the 50 solutions
IntSet splits_merged = new IntSet();
for(int i = 0; i < splits.length; i++)
splits_merged.addAll(splits[i]);
int h_merged = (splits_merged.size()*h)/N;
//do two more steps for each and keep the top 10
List<Tuple3<Double, Vec, Matrix>> top10 = fiftySolutions.parallelStream().map(tuple->
{
Vec subset_mean = tuple.getY();
Matrix subset_cov = tuple.getZ();
IntList h_prev = new IntList();
double det = 0;
//Run C step 3 times. 1 for intiailization from p-set, 2 for the 2 runs after
for(int i = 0; i < 3; i++)
det = MCD_C_step(subset_mean, subset_cov, dataset, h_prev, h_merged, false);
return new Tuple3<>(det, subset_mean, subset_cov);
}).sorted((o1, o2) -> Double.compare(o1.getX(), o2.getX()))
.limit(10)//now we have the top 10 steams
.collect(Collectors.toList())
;
for(Tuple3<Double, Vec, Matrix> initSolution : top10)
{
double prevDev = initSolution.getX();
IntList h_prev = new IntList(h);//This will get populated by the call to C_Step below
Vec m = initSolution.getY();
Matrix c = initSolution.getZ();
for(int iter = 0; iter < 20; iter++)
{
double newDet = MCD_C_step(m, c, dataset, h_prev, h, parallel);
if(Math.abs(newDet-prevDev) < 1e-9)//close enough to equal
break;
prevDev = newDet;
}
if(prevDev < bestDet)
{
bestCov = c;
bestMean = m;
bestDet = prevDev;
}
}
}
//Now we have an initial good robust estimate of mean and cov
//To compute correction terms, we need the distances of everyone to the mean
Vec T_full = bestMean;
Matrix S_full = bestCov;
MahalanobisDistance md = new MahalanobisDistance();
//regularized cov to ensure its invertable
LUPDecomposition lup = new LUPDecomposition(S_full.clone());
//Set inverse matrix for dist
md.setInverseCovariance(lup.solve(Matrix.eye(S_full.cols())));
ChiSquared chi = new ChiSquared(S_full.cols());
double[] dist = new double[N];
ParallelUtils.run(parallel, N, (start, end)->
{
for(int i = start; i < end; i++)
dist[i] = md.dist(T_full, dataset.get(i));
});
IndexTable it = new IndexTable(dist);
double reScale = Math.pow(dist[it.index(N/2)],2)/chi.invCdf(0.5);
S_full.mutableMultiply(reScale);
//applyg re-scale to the distsances
for(int i = 0; i < N; i++)
dist[i] /= reScale;
//Now we have the corrected Covariance, last step is to detmerine weights and compute mean and cov one last time
double threshold = Math.sqrt(chi.invCdf(0.975));
//since weights are 0 or 1, just collect the 1s
List<Vec> finalSet = new ArrayList<>(N);
for(int i = 0; i < N; i++)
{
if(dist[i] <= threshold)
finalSet.add(dataset.get(i));
}
//FINAL estimate of mean and cov!
mean.zeroOut();
meanVector(mean, finalSet);
cov.zeroOut();
covarianceMatrix(mean, cov, finalSet);
}
/**
* This helped function implements the C step of the Fast MCD algorithm used
* by {@link #FastMCD(jsat.linear.Vec, jsat.linear.Matrix, java.util.List, boolean)
* }.
*
* @param subset_mean current estimate of the mean
* @param subset_cov current estimate of the covariance
* @param dataset the dataset to work with resept to
* @param h_prev a location to store the new subset of used values
* @param h the subset selection size
* @param parallel
* @return the determinant of the given covariance matrix
*/
protected static <V extends Vec> double MCD_C_step(Vec subset_mean, Matrix subset_cov, List<V> dataset, IntList h_prev, final int h, boolean parallel)
{
final int N = dataset.size();
MahalanobisDistance md = new MahalanobisDistance();
//regularized cov to ensure its invertable
for(int i = 0; i < subset_cov.rows(); i++)
subset_cov.increment(i, i, 1e-4);
LUPDecomposition lup = new LUPDecomposition(subset_cov.clone());
//Set inverse matrix for dist
md.setInverseCovariance(lup.solve(Matrix.eye(subset_cov.cols())));
double[] dists = new double[N];
for(int i = 0; i < N; i++)
dists[i] = md.dist(subset_mean, dataset.get(i));
//Create new sorted ordering
IndexTable it = new IndexTable(dists);
h_prev.clear();
for(int i = 0; i < h; i++)
h_prev.add(it.index(i));
//Now lets estimate new mean and cov. We will return the old determinant for lazyness. Worst case is an extra iteration.
meanVector(subset_mean, dataset, h_prev);
covarianceMatrix(subset_mean, subset_cov, dataset, h_prev);
return lup.det();
}
}
| 27,972 | 37.797503 | 155 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/Poly2Vec.java |
package jsat.linear;
import java.util.Arrays;
import java.util.Iterator;
import java.util.NoSuchElementException;
/**
* This class is used to create an implicit representation of the degree 2
* polynomial of an input vector, with an implicit bias term added so that the
* original vector values are present in the implicit vector. This means no
* extra memory will be allocated, and all values accessed will be re-computed
* as needed. This works with sparse vectors, and work bet with algorithms that
* iterate over the nonzero values once.
* <br><br>
* Any change in the base vector will change the values in this vector. Because
* changing one value in the base effects multiple values in this one, altering
* this vector directly is not allowed.
* <br><br>
* If the base vector has {@code N} non zero values, then this vec will have
* O(N<sup>2</sup>) non zero values. (N+2)(N+1)/2 non zero values to be exact.
*
* @author Edward Raff
*/
public class Poly2Vec extends Vec
{
private static final long serialVersionUID = -5653680966558726340L;
private Vec base;
/**
* This maps values pas the original coefficients (and bias term) shifted to
* start from zero, to the appropriate value for the fist coefficient.
* <br>
* This will be created lazily as needed. Call {@link #getReverseIndex() }
* to access this value
*/
private int[] reverseIndex;
public Poly2Vec(Vec base)
{
setBase(base);
}
/*
* Some math needed for this class to make sense. Given an input we want to poly 2 form plus a bias term. So for
* (x + y + z)
* we want
* (1 + x+ y + z + x^2 + x y + x z + y^2 + y z + z^2)
*
* Then for an input of size N, the poly 2 version has length (N+2)(N+1)/2
*
* The bias term and maintaining the original is easy. So lets assume we
* only want to get the value for the x^2 term and after. IE: given a term x
* and y, give me the index of the coeff that contains their product. Let x
* start from 0 and let x^2 also start from zero, so we map from one space
* to the other.
*
* The exact index location, when x <= y, is then x N + y - x (x+1) / 2
*
*/
/**
* Creates a new vector that implicitly represents the degree 2 polynomial
* of the base vector.
*
* @param base the base vector
*/
public void setBase(Vec base)
{
this.base = base;
}
private int[] getReverseIndex()
{
if(reverseIndex != null && reverseIndex.length == base.length())
Arrays.fill(reverseIndex, 0);
else
reverseIndex = new int[base.length()];
reverseIndex[0] = base.length();
for(int i = 1; i < reverseIndex.length; i++)
reverseIndex[i] = reverseIndex[i-1]+(base.length()-i);
return reverseIndex;
}
@Override
public int length()
{
return (base.length()+2)*(base.length()+1)/2;
}
@Override
public int nnz()
{
return (base.nnz()+2)*(base.nnz()+1)/2;
}
@Override
public double get(int index)
{
if(index == 0)
return 1;
else if (index <= base.length())
return base.get(index-1);
else if (index >= length())
throw new IndexOutOfBoundsException("Vector is of length " + length() +", but index "+ index + " was requested");
int x = Arrays.binarySearch(getReverseIndex(), index-base.length()-1);
if(x < 0)
x = -x -1;
else
x++;
double xVal = base.get(x);
int y = (x*x+x)/2 + (index-base.length()-1) - base.length()*x;//the first term is safe b/c it will always be an even number before division
return xVal*base.get(y);
}
@Override
public void set(int index, double val)
{
throw new UnsupportedOperationException("Poly2Vec may not be altered");
}
@Override
public boolean isSparse()
{
return base.isSparse();
}
@Override
public Vec clone()
{
return new Poly2Vec(base.clone());
}
@Override
public Iterator<IndexValue> getNonZeroIterator(int start)
{
//First case: empty base vector
if (base.nnz() == 0)
return new Iterator<IndexValue>()
{
boolean hasNext = true;
@Override
public boolean hasNext()
{
return hasNext;
}
@Override
public IndexValue next()
{
if (!hasNext)
throw new NoSuchElementException("Iterator is empty");
hasNext = false;
return new IndexValue(0, 1.0);
}
@Override
public void remove()
{
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
};
//Else, general case
final int startStage;
final Iterator<IndexValue> startOuterIter, startInerIter;
boolean stage1Good = true;//fail occurs when the last index (or more) in the base vector is zero
if(start == 0)
{
startStage = 0;
startInerIter = startOuterIter = null;
}
else if(start <= base.length() && (stage1Good=base.getNonZeroIterator(start-1).hasNext()))
{
startStage = 1;
startOuterIter = base.getNonZeroIterator(start-1);
startInerIter = null;
}
else if(start >= length())
{
startStage = 3;
startInerIter = startOuterIter = null;
}
else//where do we start?
{
if (!stage1Good)
start = base.length() + 1;
Iterator<IndexValue> candidateOuterIter, candidateInerIter;
start--;//lazy ness so we can update first thing in each iteration (we dont actually want to change the first value in the looping
do
{
start++;
int x = Arrays.binarySearch(getReverseIndex(), start - base.length() - 1);
if (x < 0)
x = -x - 1;
else
x++;
int y = (x * x + x) / 2 + (start - base.length() - 1) - base.length() * x;//the first term is safe b/c it will always be an even number before division
candidateOuterIter = base.getNonZeroIterator(x);
/*
* If the x coefficeint is zero, we will jump to the next non
* zero x. This means y must change as well, so we will check if
* that has happened by grabbing another iterator to get the
* value. If this has happened, we know that y should be set to
* x's value
*/
int nextXIndex = candidateOuterIter.hasNext() ? base.getNonZeroIterator(x).next().getIndex() : -1;
if(candidateOuterIter.hasNext() && nextXIndex > x)//x is at a zero, so we need to inner iter to go back to the "begining"
candidateInerIter = base.getNonZeroIterator(nextXIndex);//next variable starts at val^2
else
candidateInerIter = base.getNonZeroIterator(y);
}
while ( (!candidateOuterIter.hasNext() || !candidateInerIter.hasNext()) && start < length());
if (candidateOuterIter.hasNext() && candidateInerIter.hasNext() && start < length())
{
startStage = 2;
startOuterIter = candidateOuterIter;
startInerIter = candidateInerIter;
}
else
return new Iterator<IndexValue>()
{
@Override
public boolean hasNext()
{
return false;
}
@Override
public IndexValue next()
{
throw new NoSuchElementException("Iterator is empty");
}
@Override
public void remove()
{
throw new UnsupportedOperationException("Not supported yet.");
}
};
}
return new Iterator<IndexValue>()
{
int stage = startStage;//0 is for bias, 1 is for stanrdard values, 2 is for combinations, 3 is for empty
Iterator<IndexValue> outerIter = startOuterIter, inerIter = startInerIter;
IndexValue curOuterVal = inerIter != null ? outerIter.next() : null;
IndexValue toReturn = new IndexValue(0, 0);
@Override
public boolean hasNext()
{
if(stage < 3)
return true;
return false;
}
@Override
public IndexValue next()
{
if(stage == 0)
{
stage++;
outerIter = base.getNonZeroIterator();//we know its non empty b/c of first case
return new IndexValue(0, 1.0);
}
else if (stage == 1)//outerIter must always have a next item if stage = 1
{
IndexValue iv = outerIter.next();
if (!outerIter.hasNext())
{
stage++;
outerIter = base.getNonZeroIterator();
curOuterVal = outerIter.next();
inerIter = base.getNonZeroIterator();
}
toReturn.setIndex(1+iv.getIndex());
toReturn.setValue(iv.getValue());
return toReturn;
}
else if(stage == 2)
{
IndexValue innerVal = inerIter.next();
int x = curOuterVal.getIndex();
int y = innerVal.getIndex();
int N = base.length();
toReturn.setIndex(1+N+x*N+y-x*(x+1)/2);
toReturn.setValue(curOuterVal.getValue()*innerVal.getValue());
if(!inerIter.hasNext())
{
if(!outerIter.hasNext())//we are out!
{
stage++;
outerIter = inerIter = null;
}
else//Still at least one more round!
{
curOuterVal = outerIter.next();
//new inner itter starts at idx^2
inerIter = base.getNonZeroIterator(curOuterVal.getIndex());
}
}
return toReturn;
}
else //stage >= 3
throw new NoSuchElementException("Iterator is empty");
}
@Override
public void remove()
{
throw new UnsupportedOperationException("Not supported yet.");
}
};
}
@Override
public void setLength(int length)
{
throw new UnsupportedOperationException("Poly2Vec can't extend original base vector");
}
}
| 11,703 | 34.466667 | 167 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/QRDecomposition.java |
package jsat.linear;
import java.io.Serializable;
import java.util.concurrent.ExecutorService;
import static java.lang.Math.*;
/**
*
* @author Edward Raff
*/
public class QRDecomposition implements Serializable
{
private static final long serialVersionUID = 7578073062361216223L;
private Matrix Q_T, R;
public QRDecomposition(Matrix Q, Matrix R)
{
if(!Q.isSquare())
throw new ArithmeticException("Q is always square, rectangular Q is invalid");
else if(Q.rows() != R.rows())
throw new ArithmeticException("Q and R do not agree");
this.Q_T = Q;
this.Q_T.mutableTranspose();
this.R = R;
}
public QRDecomposition(Matrix A)
{
Matrix[] qr = A.clone().qr();
Q_T = qr[0];
Q_T.mutableTranspose();
R = qr[1];
}
public QRDecomposition(Matrix A, ExecutorService threadpool)
{
Matrix[] qr = A.clone().qr(threadpool);
Q_T = qr[0];
Q_T.mutableTranspose();
R = qr[1];
}
/**
*
* @return the absolute value of the determinant of the original Matrix, abs(|A|)
*/
public double absDet()
{
if(!R.isSquare())
throw new ArithmeticException("Can only compute the determinant of a square matrix");
double absD = 1;
for(int i = 0; i < min(R.rows(), R.cols()); i++)
absD *= R.get(i, i);
return abs(absD);
}
public Vec solve(Vec b)
{
if(b.length() != R.rows())
throw new ArithmeticException("Matrix vector dimensions do not agree");
//A * x = b, we want x
//QR x = b
//R * x = Q' * b
Vec y = Q_T.multiply(b);
//Solve R * x = y using back substitution
Vec x = LUPDecomposition.backSub(R, y);
return x;
}
public Matrix solve(Matrix B)
{
//A * x = B, we want x
//QR x = b
//R * x = Q' * b
Matrix y = Q_T.multiply(B);
//Solve R * x = y using back substitution
Matrix x = LUPDecomposition.backSub(R, y);
return x;
}
public Matrix solve(Matrix B, ExecutorService threadpool)
{
//A * x = B, we want x
//QR x = b
//R * x = Q' * b
Matrix y = Q_T.multiply(B, threadpool);
//Solve R * x = y using back substitution
Matrix x = LUPDecomposition.backSub(R, y, threadpool);
return x;
}
}
| 2,574 | 23.292453 | 97 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/RandomMatrix.java | package jsat.linear;
import java.util.Random;
import jsat.utils.random.RandomUtil;
/**
* Stores a Matrix full of random values in constant O(1) space by re-computing
* all matrix values on the fly as need. This allows memory reduction and use
* when it is necessary to use the matrix with a large sparse data set, where
* some matrix values may never even be used - or used very infrequently. <br>
* <br>
* This method is most useful when:
* <ul>
* <li>A random matrix can not be fit into main memory</li>
* <li>An in memory matrix with the model being trained would result in swapping
* , in which case the slower Random Matrix would be faster since it can avoid
* swapping</li>
* <li>A very large matrix must be synchronized across many threads or machines.
* The Random Matrix takes O(1) space and is thread safe</li>
* <li>Initializing a random dense matrix</li>
* <li>The accesses of the matrix is sparse enough that not all matrix values
* will get used, or used very infrequently</li>
* </ul>
* <br><br>
* Because the values of the random matrix are computed on the fly, the Random
* Matrix can not be altered. If attempted, an exception will be thrown.
* <br><br>
* Because a Random Matric can not be altered, it can not fulfill the contract
* of {@link #getMatrixOfSameType(int, int) }. For this reason, it will return a
* {@link DenseMatrix} so that use cases of the given method do not break, and
* can return new - altered - matrices.
*
* @author Edward Raff
*/
abstract public class RandomMatrix extends GenericMatrix
{
private static final long serialVersionUID = 3514801206898749257L;
/*
* Implementation note: It is assumed that the default random object is a
* PRNG with a single word / long of state. A higher quality PRNG cant be
* used if it requires too many words of state, as the initalization will
* then dominate the computation of every index.
*/
private int rows, cols;
private long seedMult;
/**
* Creates a new random matrix object
* @param rows the number of rows for the random matrix
* @param cols the number of columns for the random matrix
*/
public RandomMatrix(int rows, int cols)
{
this(rows, cols, RandomUtil.getRandom().nextLong());
}
/**
* Creates a new random matrix object
* @param rows the number of rows for the random matrix
* @param cols the number of columns for the random matrix
* @param seedMult a value to multiply with the seed used for each
* individual index. It should be a large value
*/
public RandomMatrix(int rows, int cols, long seedMult)
{
if(rows <= 0)
throw new IllegalArgumentException("rows must be positive, not " + rows);
if(cols <= 0)
throw new IllegalArgumentException("cols must be positive, not " + cols);
this.rows = rows;
this.cols = cols;
this.seedMult = seedMult;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public RandomMatrix(RandomMatrix toCopy)
{
this(toCopy.rows, toCopy.cols, toCopy.seedMult);
}
private ThreadLocal<Random> localRand = new ThreadLocal<Random>()
{
@Override
protected Random initialValue()
{
return new Random(1);//seed will get set by user
}
};
@Override
protected Matrix getMatrixOfSameType(int rows, int cols)
{
return new DenseMatrix(rows, cols);
}
/**
* Computes the value of an index given the already initialized
* {@link Random} object. This is called by the {@link #get(int, int) }
* method, and will make sure that the correct seed is set before calling
* this method.
*
* @param rand the PRNG to generate the index value from
* @return the value for a given index based on the given PRNG
*/
abstract protected double getVal(Random rand);
@Override
public double get(int i, int j)
{
long seed = (i+1)*(j+cols)*seedMult;
Random rand = localRand.get();
rand.setSeed(seed);
return getVal(rand);
}
@Override
public void set(int i, int j, double value)
{
throw new UnsupportedOperationException("Random Matrix can not be altered");
}
@Override
public int rows()
{
return rows;
}
@Override
public int cols()
{
return cols;
}
@Override
public boolean isSparce()
{
return false;
}
@Override
public boolean canBeMutated()
{
return false;
}
@Override
public void changeSize(int newRows, int newCols)
{
throw new UnsupportedOperationException("Random Matrix can not be altered");
}
}
| 4,833 | 29.594937 | 85 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/RandomVector.java | package jsat.linear;
import java.util.Random;
import jsat.utils.random.RandomUtil;
/**
* Stores a Vector full of random values in constant O(1) space by re-computing
* all matrix values on the fly as need. This allows memory reduction and use
* when it is necessary to use the matrix with a large sparse data set, where
* some matrix values may never even be used - or used very infrequently. <br>
* <br>
* Because the values of the random vector are computed on the fly, the Random
* Vector can not be altered. If attempted, an exception will be thrown.
*
* @author Edward Raff
*/
public abstract class RandomVector extends Vec
{
private static final long serialVersionUID = -1587968421978707875L;
/*
* Implementation note: It is assumed that the default random object is a
* PRNG with a single word / long of state. A higher quality PRNG cant be
* used if it requires too many words of state, as the initalization will
* then dominate the computation of every index.
*/
private int length;
private long seedMult;
/**
* Creates a new Random Vector object
* @param length the length of the vector
*/
public RandomVector(int length)
{
this(length, RandomUtil.getRandom().nextLong());
}
/**
* Creates a new Random Vector object
* @param length the length of the vector
* @param seedMult a value to multiply with the seed used for each
* individual index. It should be a large value
*/
public RandomVector(int length, long seedMult)
{
if(length<= 0)
throw new IllegalArgumentException("Vector length must be positive, not " + length);
this.length = length;
this.seedMult = seedMult;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
protected RandomVector(RandomVector toCopy)
{
this(toCopy.length, toCopy.seedMult);
}
private ThreadLocal<Random> localRand = new ThreadLocal<Random>()
{
@Override
protected Random initialValue()
{
return new Random(1);//seed will get set by user
}
};
/**
* Computes the value of an index given the already initialized
* {@link Random} object. This is called by the {@link #get(int) }
* method, and will make sure that the correct seed is set before calling
* this method.
*
* @param rand the PRNG to generate the index value from
* @return the value for a given index based on the given PRNG
*/
abstract protected double getVal(Random rand);
@Override
public double get(int index)
{
long seed = (index+length)*seedMult;
Random rand = localRand.get();
rand.setSeed(seed);
return getVal(rand);
}
@Override
public void set(int index, double val)
{
throw new UnsupportedOperationException("RandomVector can not be altered");
}
@Override
public int length()
{
return length;
}
@Override
public void multiply(double c, Matrix A, Vec b)
{
if(this.length() != A.rows())
throw new ArithmeticException("Vector x Matrix dimensions do not agree [1," + this.length() + "] x [" + A.rows() + ", " + A.cols() + "]");
if(b.length() != A.cols())
throw new ArithmeticException("Destination vector is not the right size");
for(int i = 0; i < this.length(); i++)
{
double this_i = c*get(i);
for(int j = 0; j < A.cols(); j++)
b.increment(j, this_i*A.get(i, j));
}
}
@Override
public void mutableAdd(double c)
{
throw new UnsupportedOperationException("RandomVector can not be altered");
}
@Override
public void mutableAdd(double c, Vec b)
{
throw new UnsupportedOperationException("RandomVector can not be altered");
}
@Override
public void mutablePairwiseMultiply(Vec b)
{
throw new UnsupportedOperationException("RandomVector can not be altered");
}
@Override
public void mutableMultiply(double c)
{
throw new UnsupportedOperationException("RandomVector can not be altered");
}
@Override
public void mutablePairwiseDivide(Vec b)
{
throw new UnsupportedOperationException("RandomVector can not be altered");
}
@Override
public void mutableDivide(double c)
{
throw new UnsupportedOperationException("RandomVector can not be altered");
}
@Override
public Vec sortedCopy()
{
DenseVector dv = new DenseVector(this);
return dv.sortedCopy();
}
@Override
public double min()
{
double min = Double.MAX_VALUE;
for(IndexValue iv : this)
min = Math.min(iv.getValue(), min);
return min;
}
@Override
public double max()
{
double max = -Double.MAX_VALUE;
for(IndexValue iv : this)
max = Math.min(iv.getValue(), max);
return max;
}
@Override
public boolean isSparse()
{
return false;
}
@Override
abstract public Vec clone();
@Override
public double dot(Vec v)
{
double dot = 0;
for (IndexValue iv : v)
dot += get(iv.getIndex()) * iv.getValue();
return dot;
}
@Override
public boolean canBeMutated()
{
return false;
}
@Override
public void setLength(int length)
{
this.length = length;
}
}
| 5,612 | 25.352113 | 150 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/RowColumnOps.java |
package jsat.linear;
/**
* A collection of mutable Row and Column operations that can be performed on matrices.
* @author Edward Raff
*/
public class RowColumnOps
{
/**
* Updates the values along the main diagonal of the matrix by adding a constant to them
* @param A the matrix to perform the update on
* @param start the first index of the diagonals to update (inclusive)
* @param to the last index of the diagonals to update (exclusive)
* @param c the constant to add to the diagonal
*/
public static void addDiag(Matrix A, int start, int to, double c)
{
for(int i = start; i < to; i++)
A.increment(i, i, c);
}
/**
* Updates the values of row <tt>i</tt> in the given matrix to be A[i,:] = A[i,:]+ c
*
* @param A the matrix to perform he update on
* @param i the row to update
* @param start the first index of the row to update from (inclusive)
* @param to the last index of the row to update (exclusive)
* @param c the constant to add to each element
*/
public static void addRow(Matrix A, int i, int start, int to, double c)
{
for(int j = start; j < to; j++)
A.increment(i, j, c);
}
/**
* Updates the values of row <tt>i</tt> in the given matrix to be A[i,:] = A[i,:]+ c
*
* @param A the matrix to perform he update on
* @param i the row to update
* @param c the constant to add to each element
*/
public static void addRow(Matrix A, int i, double c)
{
addRow(A, i, 0, A.cols(), c);
}
/**
* Updates the values of row <tt>i</tt> in the given matrix to be A[i,:] = A[i,:] * c
*
* @param A the matrix to perform he update on
* @param i the row to update
* @param start the first index of the row to update from (inclusive)
* @param to the last index of the row to update (exclusive)
* @param c the constant to multiply each element by
*/
public static void multRow(Matrix A, int i, int start, int to, double c)
{
for(int j = start; j < to; j++)
A.set(i, j, A.get(i, j)*c);
}
/**
* Updates the values of row <tt>i</tt> in the given matrix to be A[i,:] = A[i,:] * c
*
* @param A the matrix to perform he update on
* @param i the row to update
* @param c the constant to multiply each element by
*/
public static void multRow(Matrix A, int i, double c)
{
multRow(A, i, 0, A.cols(), c);
}
/**
* Updates the values of row <tt>i</tt> in the given matrix to be A[i,:] = A[i,:] .* c[i]
* The Matrix <tt>A</tt> and vector <tt>c</tt> do not need to have the same dimensions,
* so long as they both have indices in the given range.
*
* @param A the matrix to perform he update on
* @param i the row to update
* @param start the first index of the row to update from (inclusive)
* @param to the last index of the row to update (exclusive)
* @param c the vector of values to multiple the elements of <tt>A</tt> by
*/
public static void multRow(Matrix A, int i, int start, int to, Vec c)
{
for(int j = start; j < to; j++)
A.set(i, j, A.get(i, j)*c.get(j));
}
/**
* Updates the values of row <tt>i</tt> in the given matrix to be A[i,:] = A[i,:] .* c[i]
* The Matrix <tt>A</tt> and vector <tt>c</tt> do not need to have the same dimensions,
* so long as they both have indices in the given range.
*
* @param A the matrix to perform he update on
* @param i the row to update
* @param c the vector of values to multiple the elements of <tt>A</tt> by
*/
public static void multRow(Matrix A, int i, Vec c)
{
if(A.cols() != c.length())
throw new ArithmeticException("Can not perform row update, length miss match " + A.cols() + " and " + c.length());
multRow(A, i, 0, c.length(), c);
}
/**
* Updates the values of row <tt>i</tt> in the given matrix to be A[i,:] = A[i,:] .* c[i]
* The Matrix <tt>A</tt> and array <tt>c</tt> do not need to have the same dimensions,
* so long as they both have indices in the given range.
*
* @param A the matrix to perform he update on
* @param i the row to update
* @param start the first index of the row to update from (inclusive)
* @param to the last index of the row to update (exclusive)
* @param c the array of values to multiple the elements of <tt>A</tt> by
*/
public static void multRow(Matrix A, int i, int start, int to, double[] c)
{
for(int j = start; j < to; j++)
A.set(i, j, A.get(i, j)*c[j]);
}
/**
* Updates the values of row <tt>i</tt> in the given matrix to be A[i,:] = A[i,:] .* c[i]
*
* @param A the matrix to perform he update on
* @param i the row to update
* @param c the array of values to multiple the elements of <tt>A</tt> by
*/
public static void multRow(Matrix A, int i, double[] c)
{
if(A.cols() != c.length)
throw new ArithmeticException("Can not perform row update, length miss match " + A.cols() + " and " + c.length);
multRow(A, i, 0, c.length, c);
}
/**
* Updates the values of row <tt>i</tt> in the given matrix to be A[i,:] = A[i,:] / c
*
* @param A the matrix to perform he update on
* @param i the row to update
* @param start the first index of the row to update from (inclusive)
* @param to the last index of the row to update (exclusive)
* @param c the constant to divide each element by
*/
public static void divRow(Matrix A, int i, int start, int to, double c)
{
for(int j = start; j < to; j++)
A.set(i, j, A.get(i, j)/c);
}
/**
* Updates the values of row <tt>i</tt> in the given matrix to be A[i,:] = A[i,:] / c
*
* @param A the matrix to perform he update on
* @param i the row to update
* @param c the constant to divide each element by
*/
public static void divRow(Matrix A, int i, double c)
{
divRow(A, i, 0, A.cols(), c);
}
/**
* Updates the values of column <tt>j</tt> in the given matrix to be A[:,j] = A[:,j]+ c
*
* @param A the matrix to perform he update on
* @param j the row to update
* @param start the first index of the row to update from (inclusive)
* @param to the last index of the row to update (exclusive)
* @param c the constant to add to each element
*/
public static void addCol(Matrix A, int j, int start, int to, double c)
{
for(int i = start; i < to; i++)
A.increment(i, j, c);
}
/**
* Updates the values of column <tt>j</tt> in the given matrix to be A[:,j] = A[:,j]+ c
*
* @param A the matrix to perform he update on
* @param j the row to update
* @param c the constant to add to each element
*/
public static void addCol(Matrix A, int j, double c)
{
addCol(A, j, 0, A.rows(), c);
}
/**
* Updates the values of column <tt>j</tt> in the given matrix to be A[:,j] = A[:,j]* c
*
* @param A the matrix to perform he update on
* @param j the row to update
* @param start the first index of the row to update from (inclusive)
* @param to the last index of the row to update (exclusive)
* @param c the constant to multiply each element by
*/
public static void multCol(Matrix A, int j, int start, int to, double c)
{
for(int i = start; i < to; i++)
A.set(i, j, A.get(i, j)*c);
}
/**
* Updates the values of column <tt>j</tt> in the given matrix to be A[:,j] = A[:,j]* c
*
* @param A the matrix to perform he update on
* @param j the row to update
* @param c the constant to multiply each element by
*/
public static void multCol(Matrix A, int j, double c)
{
multCol(A, j, 0, A.rows(), c);
}
/**
* Updates the values of column <tt>j</tt> in the given matrix to be A[:,j] = A[:,j]/c
*
* @param A the matrix to perform he update on
* @param j the row to update
* @param start the first index of the row to update from (inclusive)
* @param to the last index of the row to update (exclusive)
* @param c the constant to divide each element by
*/
public static void divCol(Matrix A, int j, int start, int to, double c)
{
for(int i = start; i < to; i++)
A.set(i, j, A.get(i, j)/c);
}
/**
* Updates the values of column <tt>j</tt> in the given matrix to be A[:,j] = A[:,j]/c
*
* @param A the matrix to perform he update on
* @param j the row to update
* @param c the constant to divide each element by
*/
public static void divCol(Matrix A, int j, double c)
{
divCol(A, j, 0, A.rows(), c);
}
/**
* Updates the values of column <tt>j</tt> in the given matrix to be A[:,j] = A[:,j]/c[j].<br>
* The Matrix <tt>A</tt> and vector <tt>c</tt> do not need to have the same dimensions,
* so long as they both have indices in the given range.
*
* @param A the matrix to perform he update on
* @param j the row to update
* @param start the first index of the row to update from (inclusive)
* @param to the last index of the row to update (exclusive)
* @param c the vector of values to pairwise divide the elements of A by
*/
public static void divCol(Matrix A, int j, int start, int to, Vec c)
{
for(int i = start; i < to; i++)
A.set(i, j, A.get(i, j)/c.get(i));
}
/**
* Updates the values of column <tt>j</tt> in the given matrix to be A[:,j] = A[:,j]/c[j].<br>
* The Matrix <tt>A</tt> and array <tt>c</tt> do not need to have the same dimensions, so long as they both have indices in the given range.
*
* @param A the matrix to perform he update on
* @param j the row to update
* @param start the first index of the row to update from (inclusive)
* @param to the last index of the row to update (exclusive)
* @param c the array of values to pairwise divide the elements of A by
*/
public static void divCol(Matrix A, int j, int start, int to, double[] c)
{
for(int i = start; i < to; i++)
A.set(i, j, A.get(i, j)/c[i]);
}
/**
* Updates the values of row <tt>i</tt> in the given matrix to be A[i,:] = A[i,:]+c[:]*<tt>t</tt>.<br>
* The Matrix <tt>A</tt> and array <tt>c</tt> do not need to have the same dimensions, so long as they both have indices in the given range.
*
* @param A the matrix to perform he update on
* @param i the row to update
* @param start the first index of the column to update from (inclusive)
* @param to the last index of the column to update (exclusive)
* @param t the constant to multiply all elements of <tt>c</tt> by
* @param c the array of values to pairwise multiply by <tt>t</tt> before adding to the elements of A
*/
public static void addMultRow(Matrix A, int i, int start, int to, double t, double[] c)
{
for(int j = start; j < to; j++)
A.increment(i, j, c[j]*t);
}
/**
* Updates the values of row <tt>i</tt> in the given matrix to be A[i,:] = A[i,:]+c[:]*<tt>t</tt>.<br>
* The Matrix <tt>A</tt> and array <tt>c</tt> do not need to have the same dimensions, so long as they both have indices in the given range.
*
* @param A the matrix to perform he update on
* @param i the row to update
* @param start the first index of the column to update from (inclusive)
* @param to the last index of the column to update (exclusive)
* @param t the constant to multiply all elements of <tt>c</tt> by
* @param c the array of values to pairwise multiply by <tt>t</tt> before adding to the elements of A
*/
public static void addMultRow(Matrix A, int i, int start, int to, double t, Vec c)
{
for(int j = start; j < to; j++)
A.increment(i, j, c.get(j)*t);
}
/**
* Updates the values of column <tt>j</tt> in the given matrix to be A[:,j] = A[:,j]+c[:]*<tt>t</tt>.<br>
* The Matrix <tt>A</tt> and array <tt>c</tt> do not need to have the same dimensions, so long as they both have indices in the given range.
*
* @param A the matrix to perform he update on
* @param j the row to update
* @param start the first index of the row to update from (inclusive)
* @param to the last index of the row to update (exclusive)
* @param t the constant to multiply all elements of <tt>c</tt> by
* @param c the array of values to pairwise multiply by <tt>t</tt> before adding to the elements of A
*/
public static void addMultCol(Matrix A, int j, int start, int to, double t, double[] c)
{
for(int i = start; i < to; i++)
A.increment(i, j, c[i]*t);
}
/**
* Updates the values of column <tt>j</tt> in the given matrix to be A[:,j] = A[:,j]+c[:]*<tt>t</tt>.<br>
* The Matrix <tt>A</tt> and vector <tt>c</tt> do not need to have the same dimensions, so long as they both have indices in the given range.
*
* @param A the matrix to perform he update on
* @param j the row to update
* @param start the first index of the row to update from (inclusive)
* @param to the last index of the row to update (exclusive)
* @param t the constant to multiply all elements of <tt>c</tt> by
* @param c the vector of values to pairwise multiply by <tt>t</tt> before adding to the elements of A
*/
public static void addMultCol(Matrix A, int j, int start, int to, double t, Vec c)
{
for(int i = start; i < to; i++)
A.increment(i, j, c.get(i)*t);
}
/**
* Swaps the columns <tt>j</tt> and <tt>k</tt> in the given matrix.
* @param A the matrix to perform he update on
* @param j the first column to swap
* @param k the second column to swap
* @param start the first row that will be included in the swap (inclusive)
* @param to the last row to be included in the swap (exclusive)
*/
public static void swapCol(Matrix A, int j, int k, int start, int to)
{
double t;
for(int i = start; i < to; i++)
{
t = A.get(i, j);
A.set(i, j, A.get(i, k));
A.set(i, k, t);
}
}
/**
* Swaps the columns <tt>j</tt> and <tt>k</tt> in the given matrix.
* @param A the matrix to perform he update on
* @param j the first column to swap
* @param k the second column to swap
*/
public static void swapCol(Matrix A, int j, int k)
{
swapCol(A, j, k, 0, A.rows());
}
/**
* Swaps the rows <tt>j</tt> and <tt>k</tt> in the given matrix.
* @param A the matrix to perform he update on
* @param j the first row to swap
* @param k the second row to swap
* @param start the first column that will be included in the swap (inclusive)
* @param to the last column to be included in the swap (exclusive)
*/
public static void swapRow(Matrix A, int j, int k, int start, int to)
{
double t;
for(int i = start; i < to; i++)
{
t = A.get(j, i);
A.set(j, i, A.get(k, i));
A.set(k, i, t);
}
}
/**
* Swaps the columns <tt>j</tt> and <tt>k</tt> in the given matrix.
* @param A the matrix to perform he update on
* @param j the first column to swap
* @param k the second column to swap
*/
public static void swapRow(Matrix A, int j, int k)
{
swapCol(A, j, k, 0, A.cols());
}
/**
* Fills the values in a row of the matrix
* @param A the matrix in question
* @param i the row of the matrix
* @param from the first column index to fill (inclusive)
* @param to the last column index to fill (exclusive)
* @param val the value to fill into the matrix
*/
public static void fillRow(Matrix A, int i, int from, int to, double val)
{
for(int j = from; j < to; j++)
A.set(i, j, val);
}
/**
* Fills the values in a column of the matrix
* @param A the matrix in question
* @param j the column of the matrix
* @param from the first row index to fill (inclusive)
* @param to the last row index to fill (exclusive)
* @param val the value to fill into the matrix
*/
public static void fillCol(Matrix A, int j, int from, int to, double val)
{
for(int i = from; i < to; i++)
A.set(i, j, val);
}
}
| 16,931 | 37.657534 | 146 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/ScaledVector.java |
package jsat.linear;
import java.util.Collections;
import java.util.Iterator;
/**
* A wrapper for a vector that represents the vector multiplied by a scalar
* constant. This allows for using and altering the value multiplied by a
* constant factor quickly, especially when the multiplicative factor must be
* changed often. Mutable operations will alter the underling vector, and all
* operations will automatically be scaled on a per element basis as needed.
* <br><br>
* If a point is reached where the multiplicative constant will be infrequently
* modified relative to the use of the vector, it may be more efficient to use
* the original vector scaled appropriately. This can be done by first calling
* {@link #embedScale() } and then calling {@link #getBase() } .
* <br><br>
* When the multiplicative constant is set to zero, the underlying base vector
* is {@link Vec#zeroOut() zeroed out} and the constant reset to 1.
*
* @author Edward Raff
*/
public class ScaledVector extends Vec
{
private static final long serialVersionUID = 7357893957632067299L;
private double scale;
private Vec base;
/**
* Creates a new scaled vector
* @param scale the initial scaling constant
* @param base the vector to implicitly scale
*/
public ScaledVector(double scale, Vec base)
{
this.scale = scale;
this.base = base;
}
/**
* Creates a new scaled vector with a default scale of 1.
* @param vec the vector to implicitly scale
*/
public ScaledVector(Vec vec)
{
this(1.0, vec);
}
/**
* Returns the current scale in use
* @return the current scale in use
*/
public double getScale()
{
return scale;
}
/**
* Explicitly sets the current scale to the given value<br><br>
* NOTE: If the scale is set to zero, the underlying base vector will be
* zeroed out, and the scale set to 1.
* @param scale the new multiplicative constant to set for the scale
*/
public void setScale(double scale)
{
if(scale == 0.0)
zeroOut();
else
this.scale = scale;
}
/**
* Returns the base vector that is being scaled
* @return the base vector that is being scaled
*/
public Vec getBase()
{
return base;
}
/**
* Embeds the current scale factor into the base vector, so that the current
* scale factor can be set to 1.
*/
public void embedScale()
{
base.mutableMultiply(scale);
scale = 1;
}
@Override
public int length()
{
return base.length();
}
@Override
public double get(int index)
{
return base.get(index)*scale;
}
@Override
public void set(int index, double val)
{
base.set(index, val/scale);
}
@Override
public void multiply(double c, Matrix A, Vec b)
{
base.multiply(c/scale, A, b);
}
@Override
public void mutableAdd(double c)
{
base.mutableAdd(c/scale);
}
@Override
public void mutableAdd(double c, Vec b)
{
base.mutableAdd(c/scale, b);
}
@Override
public void mutablePairwiseMultiply(Vec b)
{
base.mutablePairwiseMultiply(b);
}
@Override
public void mutableMultiply(double c)
{
scale *= c;
if(scale == 0.0)
this.zeroOut();
else if(Math.abs(scale) < 1e-10 || Math.abs(scale) > 1e10)
embedScale();
}
@Override
public void mutablePairwiseDivide(Vec b)
{
base.mutablePairwiseDivide(b);
}
@Override
public void mutableDivide(double c)
{
scale /= c;
if(scale == 0.0)
this.zeroOut();
}
@Override
public Vec sortedCopy()
{
return new ScaledVector(scale, base.sortedCopy());
}
@Override
public double min()
{
if(scale >= 0)
return base.min()*scale;
else
return base.max()*scale;
}
@Override
public double max()
{
if(scale >= 0)
return base.max()*scale;
else
return base.min()*scale;
}
@Override
public double sum()
{
return scale*base.sum();
}
@Override
public double mean()
{
return scale*base.mean();
}
@Override
public double standardDeviation()
{
return scale*base.standardDeviation();
}
@Override
public double median()
{
return scale*base.median();
}
@Override
public double skewness()
{
return base.skewness();//skew is scale invariant
}
@Override
public double kurtosis()
{
return base.kurtosis(); //kurtosis is scale invariant
}
@Override
public boolean isSparse()
{
return base.isSparse();
}
@Override
public Vec clone()
{
return new ScaledVector(scale, base.clone());
}
@Override
public double pNorm(double p)
{
return scale*base.pNorm(p);
}
@Override
public double dot(Vec v)
{
return scale*base.dot(v);
}
@Override
public double[] arrayCopy()
{
double[] copy = base.arrayCopy();
for(int i = 0; i < copy.length; i++)
copy[i] *= scale;
return copy;
}
@Override
public void zeroOut()
{
scale = 1.0;
base.zeroOut();
}
@Override
public int nnz()
{
return base.nnz();
}
@Override
public Iterator<IndexValue> getNonZeroIterator(int start)
{
if(scale == 0)
return Collections.EMPTY_LIST.iterator();
final Iterator<IndexValue> origIter = base.getNonZeroIterator(start);
Iterator<IndexValue> wrapedIter = new Iterator<IndexValue>()
{
@Override
public boolean hasNext()
{
return origIter.hasNext();
}
@Override
public IndexValue next()
{
IndexValue iv = origIter.next();
if( iv != null)
iv.setValue(scale*iv.getValue());
return iv;
}
@Override
public void remove()
{
origIter.remove();
}
};
return wrapedIter;
}
@Override
public void setLength(int length)
{
base.setLength(length);
}
}
| 6,599 | 20.568627 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/ShiftedVec.java |
package jsat.linear;
import static java.lang.Math.abs;
import static java.lang.Math.pow;
import java.util.Iterator;
/**
* A wrapper for a vector that represents the vector added with a scalar value.
* This allows for using and altering the value added by a
* constant factor quickly. Mutable operations will alter the underling vector,
* and all operations will automatically be adjusted on a per element basis as
* needed. The most significant performance difference is in representing a
* sparse input sifted by a constant factor. Note, that when using a sparse base
* vector - the ShiftedVec will always return {@code true} for
* {@link #isSparse() }, even if shifting makes all values non-zero
* <br>
*
* @author Edward Raff
*/
public class ShiftedVec extends Vec
{
private static final long serialVersionUID = -8318033099234181766L;
private Vec base;
private double shift;
/**
* Creates a new vector represented as <code><b>base</b>+shift</code>
* @param base the base vector to represent
* @param shift the scalar shift to add to the base vector
*/
public ShiftedVec(Vec base, double shift)
{
this.base = base;
this.shift = shift;
}
/**
* @return the base vector used by this object
*/
public Vec getBase()
{
return base;
}
/**
* Directly alters the shift value used for this vector. The old sift is
* "forgotten" immediately.
* @param shift the new value to use for the additive scalar
*/
public void setShift(double shift)
{
this.shift = shift;
}
/**
*
* @return the additive scalar used to shift over the {@link #getBase()
* base} value
*/
public double getShift()
{
return shift;
}
/**
* Embeds the current shift scalar into the base vector, modifying it and
* then setting the new shit to zero. This makes the base vector have the
* value represented by this ShiftedVec
*/
public void embedShift()
{
base.mutableAdd(shift);
shift = 0;
}
@Override
public int length()
{
return base.length();
}
@Override
public double get(int index)
{
return base.get(index)+shift;
}
@Override
public void set(int index, double val)
{
base.set(index, val-shift);
}
@Override
public void increment(int index, double val)
{
base.increment(index, val);
}
@Override
public void mutableAdd(Vec b)
{
if(b instanceof ShiftedVec)
{
ShiftedVec other = (ShiftedVec) b;
base.mutableAdd(other.base);
shift += other.shift;
}
else
base.mutableAdd(b);
}
@Override
public void mutableAdd(double c)
{
shift += c;
}
@Override
public void mutableAdd(double c, Vec b)
{
if(b instanceof ShiftedVec)
{
ShiftedVec other = (ShiftedVec) b;
base.mutableAdd(c, other.base);
shift += other.shift*c;
}
else
base.mutableAdd(c, b);
}
@Override
public void mutableDivide(double c)
{
base.mutableDivide(c);
shift /= c;
if(Double.isNaN(shift))
shift = 0;
}
@Override
public void mutableMultiply(double c)
{
base.mutableMultiply(c);
shift*=c;
}
@Override
public void mutablePairwiseDivide(Vec b)
{
//this would require multiple different shifts, so we have to fold it back into the base vector
base.mutableAdd(shift);
shift = 0;
base.mutablePairwiseDivide(b);
}
@Override
public void mutablePairwiseMultiply(Vec b)
{
//this would require multiple different shifts, so we have to fold it back into the base vector
base.mutableAdd(shift);
shift = 0;
base.mutablePairwiseMultiply(b);
}
// No real performance to gain by re-implementing matrix mul ops
// @Override
// public void multiply(double c, Matrix A, Vec b)
@Override
public double dot(Vec v)
{
if(v instanceof ShiftedVec)
{
ShiftedVec other = (ShiftedVec) v;
return this.base.dot(other.base) + other.base.sum()*this.shift + this.base.sum()*other.shift + this.length()*this.shift*other.shift;
}
return base.dot(v) + v.sum()*shift;
}
@Override
public void zeroOut()
{
base.zeroOut();
shift = 0;
}
@Override
public double pNorm(double p)
{
if(!isSparse())
return super.pNorm(p);
//else sparse base, we can save some work
//contributes of zero values
double baseZeroContribs = pow(abs(shift), p)*(length()-base.nnz());
//+ contribution of non zero values
double baseNonZeroContribs = 0;
for(IndexValue iv : base)
baseNonZeroContribs += pow(abs(iv.getValue()+shift), p);
return pow(baseNonZeroContribs+baseZeroContribs, 1/p);
}
// TODO: In the case of the y also being a ShiftedVec and sparse some significant performance could be saved.
// public double pNormDist(double p, Vec y)
@Override
public double mean()
{
return base.mean()+ shift;
}
@Override
public double variance()
{
return base.variance();
}
@Override
public double standardDeviation()
{
return base.standardDeviation();
}
@Override
public double kurtosis()
{
return base.kurtosis();
}
@Override
public double max()
{
return base.max()+shift;
}
@Override
public double min()
{
return base.min()+shift;
}
@Override
public double median()
{
return base.median()+shift;
}
@Override
public Iterator<IndexValue> getNonZeroIterator(final int start)
{
if(!isSparse())//dense case, just add the shift and use the base implemenaton since its going to do the exact same thing I would
return super.getNonZeroIterator(start);
final Iterator<IndexValue> baseIter = base.getNonZeroIterator(start);
if(shift == 0)//easy case, just use the base's iterator
return baseIter;
//ugly case, sparse vec with shifted values iterating over non zeros (which should generally be all of them)
final int lastIndx = length()-1;
return new Iterator<IndexValue>()
{
IndexValue nextBaseVal;
IndexValue nextVal;
IndexValue toRet = null;
//init
{
for(int effectiveStart = start; effectiveStart <= lastIndx; effectiveStart++)
{
nextBaseVal = baseIter.hasNext() ? baseIter.next() : null;
if (nextBaseVal != null && nextBaseVal.getIndex() == effectiveStart)
{
if (nextBaseVal.getValue() + shift == 0)
continue;//no starting on zero!
else
nextVal = new IndexValue(effectiveStart, nextBaseVal.getValue() + shift);
nextBaseVal = baseIter.hasNext() ? baseIter.next() : null;
}
else//was zero + shift
nextVal = new IndexValue(effectiveStart, shift);
toRet = new IndexValue(effectiveStart, shift);
break;
}
}
@Override
public boolean hasNext()
{
return nextVal != null;
}
@Override
public IndexValue next()
{
toRet.setIndex(nextVal.getIndex());
toRet.setValue(nextVal.getValue());
//loop to get next value b/c we may have to skip over zeros
do
{
nextVal.setIndex(nextVal.getIndex()+1);//pre-bump index
//prep next value
if(nextVal.getIndex() == lastIndx+1)
nextVal = null;//done
else
{
if(nextBaseVal != null && nextBaseVal.getIndex() == nextVal.getIndex())//there is a base non-zero next
{
nextVal.setValue(nextBaseVal.getValue()+shift);
nextBaseVal = baseIter.hasNext() ? baseIter.next() : null;
}
else//a base non-zero in our imediate future
{
nextVal.setValue(shift);
}
}
}
while(nextVal != null && nextVal.getValue() == 0);
return toRet;
}
@Override
public void remove()
{
throw new UnsupportedOperationException("Not supported.");
}
};
}
@Override
public boolean isSparse()
{
return base.isSparse();
}
@Override
public ShiftedVec clone()
{
return new ShiftedVec(base.clone(), shift);
}
@Override
public void setLength(int length)
{
throw new UnsupportedOperationException("Not supported yet.");
}
}
| 9,642 | 26.011204 | 144 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/SingularValueDecomposition.java | package jsat.linear;
import java.io.Serializable;
import java.util.Arrays;
import java.util.concurrent.ExecutorService;
import static java.lang.Math.*;
import static jsat.linear.RowColumnOps.*;
/**
* The Singular Value Decomposition (SVD) of a matrix A<sub>m,n </sub> = U<sub>m,n </sub> Σ<sub>n,n </sub> V<sup>T</sup><sub>n,n </sub>,
* where S is the diagonal matrix of the singular values sorted in descending order and are all non negative.
* <br> The SVD of a matrix has many practical uses, but is expensive to compute.
* <br><br>
* Implementation adapted from the Public Domain work of <a href="http://math.nist.gov/javanumerics/jama/"> JAMA: A Java Matrix Package</a>
* <br>
* <b>NOTE:</b> The current implementation has been revised and is now passing all test cases.
* However, it is still being tested. Use with awareness that it used to be bugged.
* Note left at revision 597
*
* @author Edward Raff
*/
public class SingularValueDecomposition implements Cloneable, Serializable
{
private static final long serialVersionUID = 1711766946748622002L;
private Matrix U, V;
/**
* Stores the diagonal values of the S matrix, and contains the bidiagonal values of A during initial steps.
*/
private double[] s;
/**
* Creates a new SVD of the matrix {@code A} such that A = U Σ V<sup>T</sup>. The matrix
* {@code A} will be modified and used as temp space when computing the SVD.
* @param A the matrix to create the SVD of
*/
public SingularValueDecomposition(Matrix A)
{
this(A, 100);
}
/**
* Creates a new SVD of the matrix {@code A} such that A = U Σ V<sup>T</sup>. The matrix
* {@code A} will be modified and used as temp space when computing the SVD.
* @param A the matrix to create the SVD of
* @param maxIterations the maximum number of iterations to perform per singular value till convergence.
*/
public SingularValueDecomposition(Matrix A, int maxIterations)
{
//By doing this we get to keep the colum major algo and get row major performance
final boolean transposedWord = A.rows() < A.cols();
Matrix AA = transposedWord ? new TransposeView(A) : A;
int m = AA.rows();
int n = AA.cols();
int nu = min(m, n);
U = new DenseMatrix(m, nu);
V = new DenseMatrix(n, n);
s = new double[min(m + 1, n)];
double[] e = new double[n];
double[] work = new double[m];
int nct = min(m - 1, n);
int nrt = max(0, min(n - 2, m));
bidiagonalize(nct, nrt, m, AA, n, e, work);
// Set up the final bidiagonal matrix or order p.
int p = min(n, m + 1);
if (nct < n)
s[nct] = AA.get(nct, nct);
if (m < p)
s[p - 1] = 0.0;
if (nrt + 1 < p)
e[nrt] = AA.get(nrt, p - 1);
e[p - 1] = 0.0;
generateU(nct, nu, m);
generateV(n, nrt, e, nu);
mainIterationLoop(p, e, n, m, maxIterations);
if(transposedWord)
{
/*
* A = U S V'
* A' = V S' U'
*/
Matrix tmp = V;
V = U;
U = tmp;
}
}
/**
* Sets the values for a SVD explicitly. This is not a copy constructor, and
* will hold the given values.
*
* @param U the U matrix of an SVD
* @param V the V matrix of an SVD
* @param s the singular sorted by magnitude of an SVD
*/
public SingularValueDecomposition(Matrix U, Matrix V, double[] s)
{
this.U = U;
this.V = V;
this.s = s;
}
private void bidiagonalize(int nct, int nrt, int m, Matrix A, int n, double[] e, double[] work)
{
for (int k = 0; k < max(nct, nrt); k++)
{
if (k < nct)
{
// Compute the transformation for the k-th column and
// place the k-th diagonal in s[k].
s[k] = 0;
for (int i = k; i < m; i++)
s[k] = hypot(s[k], A.get(i, k));
if (s[k] != 0.0)
{
if (A.get(k, k) < 0.0)
s[k] = -s[k];
divCol(A, k, k, m, s[k]);
A.increment(k, k, 1.0);
}
s[k] = -s[k];
}
for (int j = k + 1; j < n; j++)
{
if ((k < nct) & (s[k] != 0.0))
{
// Apply the transformation.
double t = 0;
for (int i = k; i < m; i++)
t += A.get(i, k) * A.get(i, j);
t = -t / A.get(k, k);
for(int i = k; i < m; i++)
A.increment(i, j, t*A.get(i, k));
}
// Place the k-th row of A into e for the
// subsequent calculation of the row transformation.
e[j] = A.get(k, j);
}
if (k < nct)
{
// Place the transformation in U for subsequent back
// multiplication.
for (int i = k; i < m; i++)
U.set(i, k, A.get(i, k));
}
if (k < nrt)
{
superDiagonalCreation(e, k, n, m, work, A);
}
}
}
private int sLength()
{
return min(U.rows(), V.rows());
}
private void superDiagonalCreation(double[] e, int k, int n, int m, double[] work, Matrix A)
{
// Compute the k-th row transformation and place the
// k-th super-diagonal in e[k].
e[k] = 0;
for (int i = k + 1; i < n; i++)
e[k] = Math.hypot(e[k], e[i]);
if (e[k] != 0.0)
{
if (e[k + 1] < 0.0)
e[k] = -e[k];
for (int i = k + 1; i < n; i++)
e[i] /= e[k];
e[k + 1] += 1.0;
}
e[k] = -e[k];
if ((k + 1 < m) & (e[k] != 0.0))
{
// Apply the transformation.
Arrays.fill(work, k+1, m, 0.0);
for (int j = k + 1; j < n; j++)
for (int i = k + 1; i < m; i++)
work[i] += e[j] * A.get(i, j);
for (int j = k + 1; j < n; j++)
{
double t = -e[j] / e[k + 1];
addMultCol(A, j, k+1, m, t, work);
}
}
// Place the transformation in V for subsequent
// back multiplication.
for (int i = k + 1; i < n; i++)
V.set(i, k, e[i]);
}
private void generateV(int n, int nrt, double[] e, int nu)
{
for (int k = n - 1; k >= 0; k--)
{
if ((k < nrt) & (e[k] != 0.0))
{
for (int j = k + 1; j < nu; j++)
{
double t = 0;
for (int i = k + 1; i < n; i++)
t += V.get(i, k) * V.get(i, j);
t = -t / V.get(k + 1, k);
for (int i = k + 1; i < n; i++)
V.increment(i, j, t * V.get(i, k));
}
}
for (int i = 0; i < n; i++)
V.set(i, k, 0.0);
V.set(k, k, 1.0);
}
}
private void generateU(int nct, int nu, int m)
{
for (int j = nct; j < nu; j++)
{
for (int i = 0; i < m; i++)
U.set(i, j, 0.0);
U.set(j, j, 1.0);
}
for (int k = nct - 1; k >= 0; k--)
{
if (s[k] != 0.0)
{
for (int j = k + 1; j < nu; j++)
{
double t = 0;
for (int i = k; i < m; i++)
t += U.get(i, k) * U.get(i, j);
t = -t / U.get(k, k);
for (int i = k; i < m; i++)
U.increment(i, j, t * U.get(i, k));
}
for (int i = k; i < m; i++)
U.set(i, k, -U.get(i, k));
U.set(k, k, 1.0 + U.get(k, k));
for (int i = 0; i < k - 1; i++)
U.set(i, k, 0.0);
}
else
{
for (int i = 0; i < m; i++)
U.set(i, k, 0.0);
U.set(k, k, 1.0);
}
}
}
private void mainIterationLoop(int p, double[] e, int n, int m, int maxIterations)
{
// Main iteration loop for the singular values.
int pp = p - 1;
int iter = 0;
double eps = pow(2.0, -52.0);
while (p > 0 && iter < maxIterations)
{
int k, kase;
// This section of the program inspects for
// negligible elements in the s and e arrays. On
// completion the variables kase and k are set as follows.
// kase = 1 if s(p) and e[k-1] are negligible and k<p
// kase = 2 if s(k) is negligible and k<p
// kase = 3 if e[k-1] is negligible, k<p, and
// s(k), ..., s(p) are not negligible (qr step).
// kase = 4 if e(p-1) is negligible (convergence).
for (k = p - 2; k >= -1; k--)
{
if (k == -1)
{
break;
}
if (abs(e[k]) <= eps * (abs(s[k]) + abs(s[k + 1])))
{
e[k] = 0.0;
break;
}
}
if (k == p - 2)
{
kase = 4;
}
else
{
int ks;
for (ks = p - 1; ks >= k; ks--)
{
if (ks == k)
break;
double t = (ks != p ? abs(e[ks]) : 0.)
+ (ks != k + 1 ? abs(e[ks - 1]) : 0.);
if (abs(s[ks]) <= eps * t)
{
s[ks] = 0.0;
break;
}
}
if (ks == k)
{
kase = 3;
}
else if (ks == p - 1)
{
kase = 1;
}
else
{
kase = 2;
k = ks;
}
}
k++;
// Perform the task indicated by kase.
switch (kase)
{
// Deflate negligible s(p).
case 1:
{
case1(e, p, k, n);
}
break;
// Split at negligible s(k).
case 2:
{
case2(e, k, p, m);
}
break;
case 3:
{
case3QRStep(p, e, k, n, m);
iter++;
}
break;
// Convergence.
case 4:
{
// Make the singular values positive.
if (s[k] <= 0.0)
{
s[k] = (s[k] < 0.0 ? -s[k] : 0.0);
multCol(V, k, 0, pp+1, -1);
}
// Order the singular values.
while (k < pp)
{
if (s[k] >= s[k + 1])
break;
double t = s[k];
s[k] = s[k + 1];
s[k + 1] = t;
if (k < n - 1)
swapCol(V, k, k+1, 0, n);
if (k < m - 1)
swapCol(U, k, k+1, 0, m);
k++;
}
iter = 0;
p--;
}
break;
}
}
}
private void case1(double[] e, int p, int k, int n)
{
double f = e[p - 2];
e[p - 2] = 0.0;
for (int j = p - 2; j >= k; j--)
{
double t = hypot(s[j], f);
double cs = s[j] / t;
double sn = f / t;
s[j] = t;
if (j != k)
{
f = -sn * e[j - 1];
e[j - 1] = cs * e[j - 1];
}
UVCase12Update(V, n, cs, j, sn, p);
}
}
private void case2(double[] e, int k, int p, int m)
{
double f = e[k - 1];
e[k - 1] = 0.0;
for (int j = k; j < p; j++)
{
double t = hypot(s[j], f);
double cs = s[j] / t;
double sn = f / t;
s[j] = t;
f = -sn * e[j];
e[j] = cs * e[j];
UVCase12Update(U, m, cs, j, sn, k);
}
}
private void UVCase12Update(Matrix UV, int m, double cs, int j, double sn, int k)
{
double t;
for (int i = 0; i < m; i++)
{
t = cs * UV.get(i, j) + sn * UV.get(i, k - 1);
UV.set(i, k - 1, -sn * UV.get(i, j) + cs * UV.get(i, k - 1));
UV.set(i, j, t);
}
}
private void case3QRStep(int p, double[] e, int k, int n, int m)
{
// Calculate the shift.
double scale = max(max(max(max(
abs(s[p - 1]), abs(s[p - 2])), abs(e[p - 2])),
abs(s[k])), abs(e[k]));
double sp = s[p - 1] / scale;
double spm1 = s[p - 2] / scale;
double epm1 = e[p - 2] / scale;
double sk = s[k] / scale;
double ek = e[k] / scale;
double b = ((spm1 + sp) * (spm1 - sp) + epm1 * epm1) / 2.0;
double c = (sp * epm1) * (sp * epm1);
double shift = 0.0;
if ((b != 0.0) | (c != 0.0))
{
shift = sqrt(b * b + c);
if (b < 0.0)
{
shift = -shift;
}
shift = c / (b + shift);
}
double f = (sk + sp) * (sk - sp) + shift;
double g = sk * ek;
// Chase zeros.
for (int j = k; j < p - 1; j++)
{
double t = hypot(f, g);
double cs = f / t;
double sn = g / t;
if (j != k)
e[j - 1] = t;
f = cs * s[j] + sn * e[j];
e[j] = cs * e[j] - sn * s[j];
g = sn * s[j + 1];
s[j + 1] = cs * s[j + 1];
UVCase3Update(V, n, cs, j, sn);
t = hypot(f, g);
cs = f / t;
sn = g / t;
s[j] = t;
f = cs * e[j] + sn * s[j + 1];
s[j + 1] = -sn * e[j] + cs * s[j + 1];
g = sn * e[j + 1];
e[j + 1] = cs * e[j + 1];
if (j < m - 1)
{
UVCase3Update(U, m, cs, j, sn);
}
}
e[p - 2] = f;
}
private void UVCase3Update(Matrix UV, int m, double cs, int j, double sn)
{
double t;
for (int i = 0; i < m; i++)
{
t = cs * UV.get(i, j) + sn * UV.get(i, j + 1);
UV.set(i, j + 1, -sn * UV.get(i, j) + cs * UV.get(i, j + 1));
UV.set(i, j, t);
}
}
/**
* Returns the backing matrix U of the SVD. Do not alter this matrix.
* @return the matrix U of the SVD
*/
public Matrix getU()
{
return U;
}
/**
* Returns the backing matrix V of the SVD. Do not later this matrix.
* @return the matrix V of the SVD
*/
public Matrix getV()
{
return V;
}
/**
* Returns a copy of the sorted array of the singular values, include the near zero ones.
* @return a copy of the sorted array of the singular values, including the near zero ones.
*/
public double[] getSingularValues()
{
return Arrays.copyOf(s, sLength());
}
/**
* Returns the diagonal matrix S such that the SVD product results in the original matrix. The diagonal contains the singular values.
* @return a dense diagonal matrix containing the singular values
*/
public Matrix getS()
{
Matrix DS = new DenseMatrix(U.rows(), V.rows());
for(int i = 0; i < sLength(); i++)
DS.set(i, i, s[i]);
return DS;
}
/**
* Returns the 2 norm of the matrix, which is the maximal singular value.
* @return the 2 norm of the matrix
*/
public double getNorm2()
{
return s[0];
}
/**
* Returns the condition number of the matrix. The condition number is a positive measure of the numerical
* instability of the matrix. The larger the value, the less stable the matrix. For singular matrices,
* the result is {@link Double#POSITIVE_INFINITY}.
* @return the condition number of the matrix
*/
public double getCondition()
{
return getNorm2()/s[sLength()-1];
}
private double getDefaultTolerance()
{
return max(U.rows(), V.rows())*(nextUp(getNorm2())-getNorm2());
}
/**
* Returns the numerical rank of the matrix. Near zero values will be ignored.
* @return the rank of the matrix
*/
public int getRank()
{
return getRank(getDefaultTolerance());
}
/**
* Indicates whether or not the input matrix was of full rank, full
* rank matrices are more numerically stable.
*
* @return <tt>true</tt> if the matrix was of full tank
*/
public boolean isFullRank()
{
return getRank() == sLength();
}
/**
* Returns the numerical rank of the matrix. Values <= than <tt>tol</tt> will be ignored.
* @param tol the cut of for singular values
* @return the rank of the matrix
*/
public int getRank(double tol)
{
for(int i = 0; i < sLength(); i++)
if(s[i] <= tol)
return i;
return sLength();
}
/**
* Returns an array containing the inverse singular values. Near zero values are converted to zero.
* @return an array containing the inverse singular values
*/
public double[] getInverseSingularValues()
{
return getInverseSingularValues(getDefaultTolerance());
}
/**
* Returns an array containing the inverse singular values. Values that are <= <tt>tol</tt> are converted to zero.
* @param tol the cut of for singular values
* @return an array containing the inverse singular values
*/
public double[] getInverseSingularValues(double tol)
{
double[] sInv = Arrays.copyOf(s, sLength());
for(int i = 0; i < sInv.length; i++)
if(sInv[i] > tol)
sInv[i] = 1.0/sInv[i];
else
sInv[i] = 0;
return sInv;
}
/**
* Returns the Moore–Penrose pseudo inverse of the matrix. The pseudo inverse for a matrix is unique. If a matrix
* is non singular, the pseudo inverse is the inverse.
*
* @return the pseudo inverse of the matrix
*/
public Matrix getPseudoInverse()
{
return getPseudoInverse(getDefaultTolerance());
}
/**
* Returns the Moore–Penrose pseudo inverse of the matrix. The pseudo inverse for a matrix is unique. If a matrix
* is non singular, the pseudo inverse is the inverse.
*
* @param tol the tolerance for singular values to ignore
* @return the pseudo inverse of the matrix
*/
public Matrix getPseudoInverse(double tol)
{
Matrix UT = U.transpose();
Matrix.diagMult(DenseVector.toDenseVec(getInverseSingularValues(tol)), UT);
return V.multiply(UT);
}
/**
* Computes the pseudo determinant of the matrix, which corresponds to absolute value of
* the determinant of the full rank square sub matrix that contains all non zero singular values.
*
* @return the pseudo determinant.
*/
public double getPseudoDet()
{
return getPseudoDet(getDefaultTolerance());
}
/**
* Computes the pseudo determinant of the matrix, which corresponds to absolute value of
* the determinant of the full rank square sub matrix that contains all non singular values > <tt>tol</tt>.
*
* @param tol the cut of for singular values
* @return the pseudo determinant
*/
public double getPseudoDet(double tol)
{
double det = 1;
for (double d : s)
if (d <= tol)
break;
else
det *= d;
return det;
}
/**
* Computes the absolute value of the determinant for the full matrix.
* @return {@link Math#abs(double) abs}(determinant)
*/
public double absDet()
{
double absDet = 1.0;
for(double d : s)
absDet *= d;
return absDet;
}
/**
* Solves the linear system of equations for A x = b by using the equation<br><code>
* x = A<sup>-1</sup> b = V S<sup>-1</sup> U<sup>T</sup> b </code>
* <br>
* When A is not full rank, this results in a more numerically stable approximation that minimizes the least squares error.
*
* @param b the vector to solve for
* @return the vector that gives the least squares solution to A x = b
*/
public Vec solve(Vec b)
{
Vec x = U.transposeMultiply(1.0, b);
x.mutablePairwiseMultiply(DenseVector.toDenseVec(getInverseSingularValues()));
return V.multiply(x);
}
/**
* Solves the linear system of equations for A x = B by using the equation<br><code>
* x = A<sup>-1</sup> B = V S<sup>-1</sup> U<sup>T</sup> B </code>
* <br>
* When A is not full rank, this results in a more numerically stable approximation that minimizes the least squares error.
*
* @param B the matrix to solve for
* @return the matrix that gives the least squares solution to A x = B
*/
public Matrix solve(Matrix B)
{
Matrix x = U.transposeMultiply(B);
Matrix.diagMult(DenseVector.toDenseVec(getInverseSingularValues()), x);
return V.multiply(x);
}
/**
* Solves the linear system of equations for A x = B by using the equation<br><code>
* x = A<sup>-1</sup> B = V S<sup>-1</sup> U<sup>T</sup> B </code>
* <br>
* When A is not full rank, this results in a more numerically stable approximation that minimizes the least squares error.
*
* @param b the matrix to solve for
* @param threadpool
* @return the matrix that gives the least squares solution to A x = B
*/
public Matrix solve(Matrix b, ExecutorService threadpool)
{
Matrix x = U.transposeMultiply(b, threadpool);
Matrix.diagMult(DenseVector.toDenseVec(getInverseSingularValues()), x);
return V.multiply(x, threadpool);
}
@Override
public SingularValueDecomposition clone()
{
return new SingularValueDecomposition(U.clone(), V.clone(), Arrays.copyOf(s, s.length));
}
}
| 23,684 | 29.133588 | 143 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/SparseMatrix.java | package jsat.linear;
import java.util.Arrays;
import java.util.Iterator;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.utils.SystemInfo;
/**
* Creates a new Sparse Matrix where each row is backed by a sparse vector.
* <br><br>
* This implementation does not support the {@link #qr() QR} or {@link #lup() }
* decompositions.
* <br>
* {@link #transposeMultiply(jsat.linear.Matrix, jsat.linear.Matrix, java.util.concurrent.ExecutorService) } currently does not use multiple cores.
*
* @author Edward Raff
*/
public class SparseMatrix extends Matrix
{
private static final long serialVersionUID = -4087445771022578544L;
private SparseVector[] rows;
/**
* Creates a new sparse matrix
* @param rows the number of rows for the matrix
* @param cols the number of columns for the matrix
* @param rowCapacity the initial capacity for non zero values for each row
*/
public SparseMatrix(int rows, int cols, int rowCapacity)
{
this.rows = new SparseVector[rows];
for(int i = 0; i < rows; i++)
this.rows[i] = new SparseVector(cols, rowCapacity);
}
/**
* Creates a new Sparse Matrix backed by the given array of SpareVectors.
* Altering the array of any object in it will also alter the this matrix.
*
* @param rows the array to back this SparseMatrix
*/
public SparseMatrix(SparseVector[] rows)
{
this.rows = rows;
for(int i = 0; i < rows.length; i++)
if(rows[i].length() != rows[0].length())
throw new IllegalArgumentException("Row " + i + " has " + rows[i].length() + " columns instead of " + rows[0].length());
}
/**
* Creates a new sparse matrix
* @param rows the number of rows for the matrix
* @param cols the number of columns for the matrix
*/
public SparseMatrix(int rows, int cols)
{
this.rows = new SparseVector[rows];
for(int i = 0; i < rows; i++)
this.rows[i] = new SparseVector(cols);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
protected SparseMatrix(SparseMatrix toCopy)
{
this.rows = new SparseVector[toCopy.rows.length];
for(int i = 0; i < rows.length; i++)
this.rows[i] = toCopy.rows[i].clone();
}
@Override
public void mutableAdd(double c, Matrix B)
{
if(!Matrix.sameDimensions(this, B))
throw new ArithmeticException("Matrices must be the same dimension to be added");
for( int i = 0; i < rows.length; i++)
rows[i].mutableAdd(c, B.getRowView(i));
}
@Override
public void mutableAdd(final double c, final Matrix B, ExecutorService threadPool)
{
if(!Matrix.sameDimensions(this, B))
throw new ArithmeticException("Matrices must be the same dimension to be added");
final CountDownLatch latch = new CountDownLatch(rows.length);
for (int i = 0; i < rows.length; i++)
{
final int ii = i;
threadPool.submit(new Runnable()
{
@Override
public void run()
{
rows[ii].mutableAdd(c, B.getRowView(ii));
latch.countDown();
}
});
}
try
{
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(SparseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
@Override
public void mutableAdd(double c)
{
for(SparseVector row : rows)
row.mutableAdd(c);
}
@Override
public void mutableAdd(final double c, ExecutorService threadPool)
{
final CountDownLatch latch = new CountDownLatch(rows.length);
for(final SparseVector row : rows)
{
threadPool.submit(new Runnable()
{
@Override
public void run()
{
row.mutableAdd(c);
latch.countDown();
}
});
}
try
{
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(SparseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
@Override
public void multiply(Vec b, double z, Vec c)
{
if(this.cols() != b.length())
throw new ArithmeticException("Matrix dimensions do not agree, [" + rows() +"," + cols() + "] x [" + b.length() + ",1]" );
if(this.rows() != c.length())
throw new ArithmeticException("Target vector dimension does not agree with matrix dimensions. Matrix has " + rows() + " rows but tagert has " + c.length());
for(int i = 0; i < rows(); i++)
{
SparseVector row = rows[i];
c.increment(i, row.dot(b)*z);
}
}
@Override
public void multiply(Matrix B, Matrix C)
{
if(!canMultiply(this, B))
throw new ArithmeticException("Matrix dimensions do not agree");
else if(this.rows() != C.rows() || B.cols() != C.cols())
throw new ArithmeticException("Target Matrix is no the correct size");
for (int i = 0; i < C.rows(); i++)
{
Vec Arowi = this.rows[i];
Vec Crowi = C.getRowView(i);
for(IndexValue iv : Arowi)
{
final int k = iv.getIndex();
double a = iv.getValue();
Vec Browk = B.getRowView(k);
Crowi.mutableAdd(a, Browk);
}
}
}
@Override
public void multiply(final Matrix B, Matrix C, ExecutorService threadPool)
{
if (!canMultiply(this, B))
throw new ArithmeticException("Matrix dimensions do not agree");
else if (this.rows() != C.rows() || B.cols() != C.cols())
throw new ArithmeticException("Target Matrix is no the correct size");
final CountDownLatch latch = new CountDownLatch(C.rows());
for (int i = 0; i < C.rows(); i++)
{
final Vec Arowi = this.rows[i];
final Vec Crowi = C.getRowView(i);
threadPool.submit(new Runnable()
{
@Override
public void run()
{
for (IndexValue iv : Arowi)
{
final int k = iv.getIndex();
double a = iv.getValue();
Vec Browk = B.getRowView(k);
Crowi.mutableAdd(a, Browk);
}
latch.countDown();
}
});
}
try
{
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(SparseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
@Override
public void mutableMultiply(double c)
{
for(SparseVector row : rows)
row.mutableMultiply(c);
}
@Override
public void mutableMultiply(final double c, ExecutorService threadPool)
{
final CountDownLatch latch = new CountDownLatch(rows.length);
for(final SparseVector row : rows)
{
threadPool.submit(new Runnable()
{
@Override
public void run()
{
row.mutableMultiply(c);
latch.countDown();
}
});
}
try
{
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(SparseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
@Override
public Matrix[] lup()
{
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public Matrix[] lup(ExecutorService threadPool)
{
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public Matrix[] qr()
{
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public Matrix[] qr(ExecutorService threadPool)
{
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
@Override
public void mutableTranspose()
{
for(int i = 0; i < rows()-1; i++)
for(int j = i+1; j < cols(); j++)
{
double tmp = get(j, i);
set(j, i, get(i, j));
set(i, j, tmp);
}
}
@Override
public void transpose(Matrix C)
{
if(this.rows() != C.cols() || this.cols() != C.rows())
throw new ArithmeticException("Target matrix does not have the correct dimensions");
C.zeroOut();
for(int row = 0; row < rows.length; row++)
for(IndexValue iv : rows[row])
C.set(iv.getIndex(), row, iv.getValue());
}
@Override
public void transposeMultiply(Matrix B, Matrix C)
{
if(this.rows() != B.rows())//Normaly it is A_cols == B_rows, but we are doint A'*B, not A*B
throw new ArithmeticException("Matrix dimensions do not agree");
else if(this.cols() != C.rows() || B.cols() != C.cols())
throw new ArithmeticException("Destination matrix does not have matching dimensions");
final SparseMatrix A = this;
///Should choose step size such that 2*NB2^2 * dataTypeSize <= CacheSize
final int kLimit = this.rows();
for (int k = 0; k < kLimit; k++)
{
Vec bRow_k = B.getRowView(k);
Vec aRow_k = A.getRowView(k);
for (IndexValue iv : aRow_k)//iterating over "i"
{
Vec cRow_i = C.getRowView(iv.getIndex());
double a = iv.getValue();//A.get(k, i);
cRow_i.mutableAdd(a, bRow_k);
}
}
}
@Override
public void transposeMultiply(final Matrix B, final Matrix C, ExecutorService threadPool)
{
transposeMultiply(B, C);//TODO use the multiple threads
}
@Override
public void transposeMultiply(double c, Vec b, Vec x)
{
if(this.rows() != b.length())
throw new ArithmeticException("Matrix dimensions do not agree, [" + cols() +"," + rows() + "] x [" + b.length() + ",1]" );
else if(this.cols() != x.length())
throw new ArithmeticException("Matrix dimensions do not agree with target vector");
for(IndexValue b_iv : b)
x.mutableAdd(c*b_iv.getValue(), rows[b_iv.getIndex()]);
}
@Override
public Vec getRowView(int r)
{
return rows[r];
}
@Override
public double get(int i, int j)
{
return rows[i].get(j);
}
@Override
public void set(int i, int j, double value)
{
rows[i].set(j, value);
}
@Override
public void increment(int i, int j, double value)
{
rows[i].increment(j, value);
}
@Override
public int rows()
{
return rows.length;
}
@Override
public int cols()
{
return rows[0].length();
}
@Override
public boolean isSparce()
{
return true;
}
@Override
public void swapRows(int r1, int r2)
{
SparseVector tmp = rows[r2];
rows[r2] = rows[r1];
rows[r1] = tmp;
}
@Override
public void zeroOut()
{
for(Vec row : rows)
row.zeroOut();
}
@Override
public SparseMatrix clone()
{
return new SparseMatrix(this);
}
@Override
public long nnz()
{
int nnz = 0;
for(Vec v : rows)
nnz += v.nnz();
return nnz;
}
@Override
public void changeSize(int newRows, int newCols)
{
if(newRows <= 0)
throw new ArithmeticException("Matrix must have a positive number of rows");
if(newCols <= 0)
throw new ArithmeticException("Matrix must have a positive number of columns");
final int oldRows = rows.length;
if(newCols != cols())
{
for(int i = 0; i < rows.length; i++)
{
final SparseVector row_i = rows[i];
while(row_i.getLastNonZeroIndex() >= newCols)
row_i.set(row_i.getLastNonZeroIndex(), 0);
row_i.setLength(newCols);
}
}
//update new rows
rows = Arrays.copyOf(rows, newRows);
for(int i = oldRows; i < newRows; i++)
rows[i] = new SparseVector(newCols);
}
@Override
public void multiplyTranspose(Matrix B, Matrix C)
{
if(this.cols() != B.cols())
throw new ArithmeticException("Matrix dimensions do not agree");
else if (this.rows() != C.rows() || B.rows() != C.cols())
throw new ArithmeticException("Target Matrix is no the correct size");
for (int i = 0; i < this.rows(); i++)
{
final SparseVector A_i = this.rows[i];
for (int j = 0; j < B.rows(); j++)
{
final Vec B_j = B.getRowView(j);
double C_ij = 0;
if(!B_j.isSparse())//B is dense, lets do this the easy way
{
for (IndexValue iv : A_i)
C_ij += iv.getValue() * B_j.get(iv.getIndex());
C.increment(i, j, C_ij);
continue;//Skip early, we did it!
}
//else, sparse
Iterator<IndexValue> A_iter = A_i.getNonZeroIterator();
Iterator<IndexValue> B_iter = B_j.getNonZeroIterator();
if(!B_iter.hasNext() || !A_iter.hasNext())//one is all zeros, nothing to do
continue;
IndexValue A_val = A_iter.next();
IndexValue B_val = B_iter.next();
while(A_val != null && B_val != null)//go add everything together!
{
if(A_val.getIndex() == B_val.getIndex())//inc and bump both
{
C_ij += A_val.getValue()*B_val.getValue();
if(A_iter.hasNext())
A_val = A_iter.next();
else
A_val = null;
if(B_iter.hasNext())
B_val = B_iter.next();
else
B_val = null;
}
else if(A_val.getIndex() < B_val.getIndex())//A is behind, bump it
{
if(A_iter.hasNext())
A_val = A_iter.next();
else
A_val = null;
}
else//B is behind, bump it
{
if(B_iter.hasNext())
B_val = B_iter.next();
else
B_val = null;
}
}
C.increment(i, j, C_ij);
}
}
}
@Override
public void multiplyTranspose(final Matrix B, final Matrix C, ExecutorService threadPool)
{
if(this.cols() != B.cols())
throw new ArithmeticException("Matrix dimensions do not agree");
else if (this.rows() != C.rows() || B.rows() != C.cols())
throw new ArithmeticException("Target Matrix is no the correct size");
final SparseMatrix A = this;
final CountDownLatch latch = new CountDownLatch(SystemInfo.LogicalCores);
for(int id = 0; id < SystemInfo.LogicalCores; id++)
{
final int ID = id;
threadPool.submit(new Runnable()
{
@Override
public void run()
{
try{
for (int i = ID; i < A.rows(); i += SystemInfo.LogicalCores)
{
final SparseVector A_i = A.rows[i];
for (int j = 0; j < B.rows(); j++)
{
final Vec B_j = B.getRowView(j);
double C_ij = 0;
if(!B_j.isSparse())//B is dense, lets do this the easy way
{
for (IndexValue iv : A_i)
C_ij += iv.getValue() * B_j.get(iv.getIndex());
C.increment(i, j, C_ij);
continue;//Skip early, we did it!
}
//else, sparse
Iterator<IndexValue> A_iter = A_i.getNonZeroIterator();
Iterator<IndexValue> B_iter = B_j.getNonZeroIterator();
if(!B_iter.hasNext() || !A_iter.hasNext())//one is all zeros, nothing to do
continue;
IndexValue A_val = A_iter.next();
IndexValue B_val = B_iter.next();
while(A_val != null && B_val != null)//go add everything together!
{
if(A_val.getIndex() == B_val.getIndex())//inc and bump both
{
C_ij += A_val.getValue()*B_val.getValue();
if(A_iter.hasNext())
A_val = A_iter.next();
else
A_val = null;
if(B_iter.hasNext())
B_val = B_iter.next();
else
B_val = null;
}
else if(A_val.getIndex() < B_val.getIndex())//A is behind, bump it
{
if(A_iter.hasNext())
A_val = A_iter.next();
else
A_val = null;
}
else//B is behind, bump it
{
if(B_iter.hasNext())
B_val = B_iter.next();
else
B_val = null;
}
}
C.increment(i, j, C_ij);
}
}
}
catch(Exception ex)
{
ex.printStackTrace();
}
System.out.println(ID + " fin");
latch.countDown();
}
});
}
try
{
latch.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(SparseMatrix.class.getName()).log(Level.SEVERE, null, ex);
}
}
}
| 20,011 | 31.43436 | 168 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/SparseVector.java |
package jsat.linear;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import static java.lang.Math.*;
import java.util.*;
import jsat.math.Function1D;
import jsat.math.IndexFunction;
import jsat.utils.DoubleList;
import jsat.utils.IndexTable;
/**
* Provides a vector implementation that is sparse. It does not allocate space
* for a vector of the specified size, and only stores non zero values. All
* values not stored are implicitly zero.
* <br>
* Operations that change several zero values in a sparse vector to non-zero
* values may have degraded performance.
* <br>
* Sparce vector should never be used unless at least half the values are zero.
* If more then half the values are non-zero, it will use more memory then an
* equivalent {@link DenseVector}. The more values that are zero in the vector,
* the better its performance will be.
*
* @author Edward Raff
*/
public class SparseVector extends Vec
{
private static final long serialVersionUID = 8591745505666264662L;
/**
* Length of the vector
*/
private int length;
/**
* number of indices used in this vector
*/
protected int used;
/**
* The mapping to true index values
*/
protected int[] indexes;
/**
* The Corresponding values for each index
*/
protected double[] values;
/**
* Creates a new sparse vector of the given length that is all zero values.
*
* @param length the length of the sparse vector
*/
public SparseVector(int length)
{
this(length, 10);
}
/**
* Creates a new sparse vector of the same length as {@code vals} and sets
* each value to the values in the list.
*
* @param vals the list of values to create a vector from
*/
public SparseVector(List<Double> vals)
{
this(vals.size());
int z = 0;
for(int i = 0; i < vals.size(); i++)
if(vals.get(i) != 0)
{
if(z >= indexes.length)
{
indexes = Arrays.copyOf(indexes, indexes.length*3/2);
values = Arrays.copyOf(values, values.length*3/2);
}
indexes[z] = i;
values[z++] = vals.get(i);
}
}
/**
* Creates a new sparse vector of the specified length, and pre-allocates
* enough internal state to hold {@code capacity} non zero values. The
* vector itself will start out with all zero values.
*
* @param length the length of the sparse vector
* @param capacity the number of non zero values to allocate space for
*/
public SparseVector(int length, int capacity)
{
this(new int[capacity], new double[capacity], length, 0);
}
/**
* Creates a new sparse vector backed by the given arrays. Modifying the
* arrays will modify the vector, and no validation will be done. This
* constructor should only be used in performance necessary scenarios<br>
* To make sure the input values are valid, the {@code indexes } values must
* be increasing and all values less than {@code length} and greater than
* {@code -1} up to the first {@code used} indices.<br>
* All the values stored in {@code values} must be non zero and can not be a
* special value. <br>
* {@code used} must be greater than -1 and less than the length of the
* {@code indexes} and {@code values} arrays. <br>
* The {@code indexes} and {@code values} arrays must be the exact same
* length
*
* @param indexes the array to store the index locations in
* @param values the array to store the index values in
* @param length the length of the sparse vector
* @param used the number of non zero values in the vector taken from the
* given input arrays.
*/
public SparseVector(int[] indexes, double[] values, int length, int used)
{
if(values.length != indexes.length)
throw new IllegalArgumentException("Index and Value arrays must have the same length, instead index was " + indexes.length + " and values was " + values.length);
if(used < 0 || used > length || used > values.length)
throw new IllegalArgumentException("Bad used value. Used must be in the range of 0 and min of values length (" + values.length + ") and array length (" + length + "), instead was given " + used);
if(length <= 0)
throw new IllegalArgumentException("Length of sparse vector must be positive, not " + length);
this.used = used;
this.length = length;
this.indexes = indexes;
this.values = values;
}
/**
* Creates a new sparse vector by copying the values from another
* @param toCopy the vector to copy the values of
*/
public SparseVector(Vec toCopy)
{
this(toCopy.length(), toCopy.nnz());
for(IndexValue iv : toCopy)
{
indexes[used] = iv.getIndex();
values[used++] = iv.getValue();
}
}
@Override
public int length()
{
return length;
}
/**
* Because sparce vectors do not have most value set, they can
* have their length increased, and sometimes decreased, without
* any effort. The length can always be extended. The length can
* be reduced down to the size of the largest non zero element.
*
* @param length the new length of this vector
*/
@Override
public void setLength(int length)
{
if(used > 0 && length < indexes[used-1])
throw new RuntimeException("Can not set the length to a value less then an index already in use");
this.length = length;
}
@Override
public int nnz()
{
return used;
}
/**
* Removes a non zero value by shifting everything to the right over by one
* @param nzIndex the index to remove (setting it to zero)
*/
private void removeNonZero(int nzIndex)
{
for(int i = nzIndex+1; i < used; i++)
{
values[i-1] = values[i];
indexes[i-1] = indexes[i];
}
used--;
}
/**
* Increments the value at the given index by the given value.
* @param index the index of the value to alter
* @param val the value to be added to the index
*/
@Override
public void increment(int index, double val)
{
if (index > length - 1 || index < 0)
throw new IndexOutOfBoundsException("Can not access an index larger then the vector or a negative index");
if(val == 0)//donst want to insert a zero, and a zero changes nothing
return;
int location = Arrays.binarySearch(indexes, 0, used, index);
if(location < 0)
insertValue(location, index, val);
else
{
values[location]+=val;
if(values[location] == 0.0)
removeNonZero(location);
}
}
@Override
public double get(int index)
{
if (index > length - 1 || index < 0)
throw new ArithmeticException("Can not access an index larger then the vector or a negative index");
int location = Arrays.binarySearch(indexes, 0, used, index);
if (location < 0)
return 0.0;
else
return values[location];
}
@Override
public void set(int index, double val)
{
if(index > length()-1 || index < 0)
throw new IndexOutOfBoundsException(index + " does not fit in [0," + length + ")");
if( used > 0 && index > indexes[used-1])//fast path, just stick it on the end
{
insertValue(-used-1, index, val);//inser at used, but modify to match what Arrays.binarySearch would have returned, b/c thats what function expects
return;
}
int insertLocation = Arrays.binarySearch(indexes, 0, used, index);
if(insertLocation >= 0)
{
if(val != 0)//set it
values[insertLocation] = val;
else//shift used count and everyone over
{
removeNonZero(insertLocation);
}
}
else if(val != 0)//dont insert 0s, that is stupid
insertValue(insertLocation, index, val);
}
/**
* Takes the negative insert location value returned by {@link Arrays#binarySearch(int[], int, int, int) }
* and adjust the vector to add the given value into this location. Should only be called with negative
* input returned by said method. Should never be called for an index that in fact does already exist
* in this sparce vector.
*
* @param insertLocation the negative insertion index such that -(insertLocation+1) is the address that the value should have
* @param index the index that is being added
* @param val the value that is being added for the given index
*/
private void insertValue(int insertLocation, int index, double val)
{
insertLocation = -(insertLocation+1);//Convert from negative value to the location is should be placed, see JavaDoc of binarySearch
if(used == indexes.length)//Full, expand
{
int newIndexesSize = Math.max(Math.min(indexes.length*2, Integer.MAX_VALUE), 8);
indexes = Arrays.copyOf(indexes, newIndexesSize);
values = Arrays.copyOf(values, newIndexesSize);
}
if(insertLocation < used)//Instead of moving indexes over manualy, set it up to use a native System call to move things out of the way
{
System.arraycopy(indexes, insertLocation, indexes, insertLocation+1, used-insertLocation);
System.arraycopy(values, insertLocation, values, insertLocation+1, used-insertLocation);
}
indexes[insertLocation] = index;
values[insertLocation] = val;
used++;
}
@Override
public Vec sortedCopy()
{
IndexTable it = new IndexTable(DoubleList.unmodifiableView(values, used));
double[] newValues = new double[used];
int[] newIndecies = new int[used];
int lessThanZero = 0;
for(int i = 0; i < used; i++)
{
int origIndex = it.index(i);
newValues[i] = values[origIndex];
if(newValues[i] < 0)
lessThanZero++;
newIndecies[i] = i;
}
//all < 0 values are right, now correct > 0 values
for(int i = lessThanZero; i < used; i++)
newIndecies[i] = length-(used-lessThanZero)+(i-lessThanZero);
SparseVector sv = new SparseVector(length);
sv.used = this.used;
sv.values = newValues;
sv.indexes = newIndecies;
return sv;
}
/**
* Returns the index of the last non-zero value, or -1 if all values are zero.
* @return the index of the last non-zero value, or -1 if all values are zero.
*/
public int getLastNonZeroIndex()
{
if(used == 0)
return -1;
return indexes[used-1];
}
@Override
public double min()
{
double result = 0;
for(int i = 0; i < used; i++)
result = Math.min(result, values[i]);
return result;
}
@Override
public double max()
{
double result = 0;
for(int i = 0; i < used; i++)
result = Math.max(result, values[i]);
return result;
}
@Override
public double sum()
{
/*
* Uses Kahan summation algorithm, which is more accurate then
* naively summing the values in floating point. Though it
* does not guarenty the best possible accuracy
*
* See: http://en.wikipedia.org/wiki/Kahan_summation_algorithm
*/
double sum = 0;
double c = 0;
for(int i = 0; i < used; i++)
{
double d = values[i];
double y = d - c;
double t = sum+y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
@Override
public double variance()
{
double mu = mean();
double tmp = 0;
double N = length();
for(int i = 0; i < used; i++)
tmp += Math.pow(values[i]-mu, 2);
//Now add all the zeros into it
tmp += (length()-used) * Math.pow(0-mu, 2);
tmp /= N;
return tmp;
}
@Override
public double median()
{
if(used < length/2)//more than half zeros, so 0 must be the median
return 0.0;
else
return super.median();
}
@Override
public double skewness()
{
double mean = mean();
double numer = 0, denom = 0;
for(int i = 0; i < used; i++)
{
numer += pow(values[i]-mean, 3);
denom += pow(values[i]-mean, 2);
}
//All the zero's we arent storing
numer += pow(-mean, 3)*(length-used);
denom += pow(-mean, 2)*(length-used);
numer /= length;
denom /= length;
double s1 = numer / (pow(denom, 3.0/2.0) );
if(length >= 3)//We can use the bias corrected formula
return sqrt(length*(length-1))/(length-2)*s1;
return s1;
}
@Override
public double kurtosis()
{
double mean = mean();
double tmp = 0;
double var = 0;
for(int i = 0; i < used; i++)
{
tmp += pow(values[i]-mean, 4);
var += pow(values[i]-mean, 2);
}
//All the zero's we arent storing
tmp += pow(-mean, 4)*(length-used);
var += pow(-mean, 2)*(length-used);
tmp /= length;
var /= length;
return tmp / pow(var, 2) - 3;
}
@Override
public void copyTo(Vec destination)
{
if(destination instanceof SparseVector)
{
SparseVector other = (SparseVector) destination;
if(other.indexes.length < this.used)
{
other.indexes = Arrays.copyOf(this.indexes, this.used);
other.values = Arrays.copyOf(this.values, this.used);
other.used = this.used;
}
else
{
other.used = this.used;
System.arraycopy(this.indexes, 0, other.indexes, 0, this.used);
System.arraycopy(this.values, 0, other.values, 0, this.used);
}
}
else
super.copyTo(destination);
}
@Override
public double dot(Vec v)
{
double dot = 0;
if(v instanceof SparseVector)
{
SparseVector b = (SparseVector) v;
int p1 = 0, p2 = 0;
while (p1 < used && p2 < b.used)
{
int a1 = indexes[p1], a2 = b.indexes[p2];
if (a1 == a2)
dot += values[p1++] * b.values[p2++];
else if (a1 > a2)
p2++;
else
p1++;
}
}
else if(v.isSparse())
return super.dot(v);
else// it is dense
for (int i = 0; i < used; i++)
dot += values[i] * v.get(indexes[i]);
return dot;
}
@Override
public String toString()
{
StringBuilder sb = new StringBuilder("[");
int p = 0;
for(int i = 0; i < length(); i++)
{
if(i != 0)
sb.append(", ");
if(p < used && indexes[p] == i)
sb.append(values[p++]);
else
sb.append("0.0");
}
sb.append("]");
return sb.toString();
}
@Override
public void multiply(double c, Matrix A, Vec b)
{
if(this.length() != A.rows())
throw new ArithmeticException("Vector x Matrix dimensions do not agree");
else if(b.length() != A.cols())
throw new ArithmeticException("Destination vector is not the right size");
for(int i = 0; i < used; i++)
{
double val = c*this.values[i];
int index = this.indexes[i];
for(int j = 0; j < A.cols(); j++)
b.increment(j, val*A.get(index, j));
}
}
@Override
public void mutableAdd(double c)
{
if(c == 0.0)
return;
/* This NOT the most efficient way to implement this.
* But adding a constant to every value in a sparce
* vector defeats its purpos.
*/
for(int i = 0; i < length(); i++)
this.set(i, get(i) + c);
}
@Override
public void mutableAdd(double c, Vec v)
{
if(c == 0.0)
return;
if(v instanceof SparseVector)
{
SparseVector b = (SparseVector) v;
int p1 = 0, p2 = 0;
while (p1 < used && p2 < b.used)
{
int a1 = indexes[p1], a2 = b.indexes[p2];
if (a1 == a2)
{
values[p1] += c*b.values[p2];
p1++;
p2++;
}
else if (a1 > a2)
{
//0 + some value is that value, set it
this.set(a2, c*b.values[p2]);
/*
* p2 must be increment becase were moving to the next value
*
* p1 must be be incremented becase a2 was less thenn the current index.
* So the inseration occured before p1, so for indexes[p1] to == a1,
* p1 must be incremented
*
*/
p1++;
p2++;
}
else//a1 < a2, thats adding 0 to this vector, nothing to do.
{
p1++;
}
}
//One of them is now empty.
//If b is not empty, we must add b to this. If b is empty, we would be adding zeros to this [so we do nothing]
while(p2 < b.used)
this.set(b.indexes[p2], c*b.values[p2++]);//TODO Can be done more efficently
}
else if(v.isSparse())
{
if(v.nnz() == 0)
return;
int p1 = 0;
Iterator<IndexValue> iter = v.getNonZeroIterator();
IndexValue iv = iter.next();
while(p1 < used && iv != null)
{
int a1 = indexes[p1];
int a2 = iv.getIndex();
if(a1 == a2)
{
values[p1++] += c*iv.getValue();
if(iter.hasNext())
iv = iter.next();
else
break;
}
else if(a1 > a2)
{
this.set(a2, c*iv.getValue());
p1++;
if(iter.hasNext())
iv = iter.next();
else
break;
}
else
p1++;
}
}
else
{
//Else it is dense
for(int i = 0; i < length(); i++)
this.set(i, this.get(i) + c*v.get(i));
}
}
@Override
public void mutableMultiply(double c)
{
if(c == 0.0)
{
zeroOut();
return;
}
for(int i = 0; i < used; i++)
values[i] *= c;
}
@Override
public void mutableDivide(double c)
{
if(c == 0 && used != length)
throw new ArithmeticException("Division by zero would occur");
for(int i = 0; i < used; i++)
values[i] /= c;
}
@Override
public double pNormDist(double p, Vec y)
{
if(this.length() != y.length())
throw new ArithmeticException("Vectors must be of the same length");
double norm = 0;
if (y instanceof SparseVector)
{
int p1 = 0, p2 = 0;
SparseVector b = (SparseVector) y;
while (p1 < this.used && p2 < b.used)
{
int a1 = indexes[p1], a2 = b.indexes[p2];
if (a1 == a2)
{
norm += Math.pow(Math.abs(this.values[p1] - b.values[p2]), p);
p1++;
p2++;
}
else if (a1 > a2)
norm += Math.pow(Math.abs(b.values[p2++]), p);
else//a1 < a2, this vec has a value, other does not
norm += Math.pow(Math.abs(this.values[p1++]), p);
}
//One of them is now empty.
//So just sum up the rest of the elements
while(p1 < this.used)
norm += Math.pow(Math.abs(this.values[p1++]), p);
while(p2 < b.used)
norm += Math.pow(Math.abs(b.values[p2++]), p);
}
else
{
int z = 0;
for (int i = 0; i < length(); i++)
{
//Move through until we hit our next non zero element
while (z < used && indexes[z] > i)
norm += Math.pow(Math.abs(-y.get(i++)), p);
//We made it! (or are at the end). Is our non zero value the same?
if (z < used && indexes[z] == i)
norm += Math.pow(Math.abs(values[z++] - y.get(i)), p);
else//either we used a non zero of this in the loop or we are out of them
norm += Math.pow(Math.abs(-y.get(i)), p);
}
}
return Math.pow(norm, 1.0/p);
}
@Override
public double pNorm(double p)
{
if (p <= 0)
throw new IllegalArgumentException("norm must be a positive value, not " + p);
double result = 0;
if (p == 1)
{
for (int i = 0; i < used; i++)
result += abs(values[i]);
}
else if (p == 2)
{
for (int i = 0; i < used; i++)
result += values[i] * values[i];
result = Math.sqrt(result);
}
else if (Double.isInfinite(p))
{
for (int i = 0; i < used; i++)
result = Math.max(result, abs(values[i]));
}
else
{
for (int i = 0; i < used; i++)
result += Math.pow(Math.abs(values[i]), p);
result = pow(result, 1 / p);
}
return result;
}
@Override
public SparseVector clone()
{
SparseVector copy = new SparseVector(length, Math.max(used, 10));
System.arraycopy(this.values, 0, copy.values, 0, this.used);
System.arraycopy(this.indexes, 0, copy.indexes, 0, this.used);
copy.used = this.used;
return copy;
}
@Override
public void normalize()
{
double sum = 0;
for(int i = 0; i < used; i++)
sum += values[i]*values[i];
sum = Math.sqrt(sum);
mutableDivide(Math.max(sum, 1e-10));
}
@Override
public void mutablePairwiseMultiply(Vec b)
{
if(this.length() != b.length())
throw new ArithmeticException("Vectors must have the same length");
for(int i = 0; i < used; i++)
values[i] *= b.get(indexes[i]);//zeros stay zero
}
@Override
public void mutablePairwiseDivide(Vec b)
{
if(this.length() != b.length())
throw new ArithmeticException("Vectors must have the same length");
for (int i = 0; i < used; i++)
values[i] /= b.get(indexes[i]);//zeros stay zero
}
@Override
public boolean equals(Object obj, double range)
{
if(!(obj instanceof Vec))
return false;
Vec otherVec = (Vec) obj;
range = Math.abs(range);
if(this.length() != otherVec.length())
return false;
int z = 0;
for (int i = 0; i < length(); i++)
{
//Move through until we hit the next null element, comparing the other vec to zero
while (z < used && indexes[z] > i)
if (Math.abs(otherVec.get(i++)) > range)//We are zero!
return false;
//We made it! (or are at the end). Is our non zero value the same?
if (z < used && indexes[z] == i)
if (Math.abs(values[z++] - otherVec.get(i)) > range)
if (Double.isNaN(values[z++]) && Double.isNaN(otherVec.get(i)))//NaN != NaN is always true, so check special
return true;
else
return false;
}
return true;
}
@Override
public double[] arrayCopy()
{
double[] array = new double[length()];
for(int i = 0; i < used; i++)
array[indexes[i]] = values[i];
return array;
}
@Override
public void applyFunction(Function1D f)
{
if(f.f(0.0) != 0.0)
super.applyFunction(f);
else//Then we only need to apply it to the non zero values!
{
for(int i = 0; i < used; i++)
values[i] = f.f(values[i]);
}
}
@Override
public void applyIndexFunction(IndexFunction f)
{
if(f.f(0.0, -1) != 0.0)
super.applyIndexFunction(f);
else//Then we only need to apply it to the non zero values!
{
/*
* The indexFunction may turn a value to zero, if so, we need to
* shift everything over and skip based on how many zeros have been
* created
*/
int skip = 0;
for(int i = 0; i < used; i++)
{
indexes[i-skip] = indexes[i];
values[i-skip] = f.indexFunc(values[i], i);
if(values[i-skip] == 0.0)
skip++;
}
used -= skip;
}
}
@Override
public void zeroOut()
{
this.used = 0;
}
@Override
public Iterator<IndexValue> getNonZeroIterator(final int start)
{
if(used <= 0)
return Collections.EMPTY_LIST.iterator();
final int startPos;
if(start <= indexes[0])
startPos = 0;
else
{
int tmpIndx = Arrays.binarySearch(indexes, 0, used, start);
if(tmpIndx >= 0)
startPos = tmpIndx;
else
startPos = -(tmpIndx)-1;
}
Iterator<IndexValue> itor = new Iterator<IndexValue>()
{
int curUsedPos = startPos;
IndexValue indexValue = new IndexValue(-1, Double.NaN);
@Override
public boolean hasNext()
{
return curUsedPos < used;
}
@Override
public IndexValue next()
{
indexValue.setIndex(indexes[curUsedPos]);
indexValue.setValue(values[curUsedPos++]);
return indexValue;
}
@Override
public void remove()
{
throw new UnsupportedOperationException("Not supported yet.");
}
};
return itor;
}
@Override
public int hashCode()
{
int result = 1;
for (int i = 0; i < used; i++)
{
long bits = Double.doubleToLongBits(values[i]);
result = 31 * result + (int)(bits ^ (bits >>> 32));
result = 31 * result + indexes[i];
}
return 31* result + length;
}
@Override
public boolean isSparse()
{
return true;
}
private void readObject(ObjectInputStream in) throws ClassNotFoundException, IOException
{
this.length = in.readInt();
this.used = in.readInt();
this.indexes = new int[this.used];
this.values = new double[this.used];
for(int i = 0; i < this.used; i++)
{
indexes[i] = in.readInt();
values[i] = in.readDouble();
}
}
private void writeObject(ObjectOutputStream out) throws IOException
{
out.writeInt(length);
out.writeInt(used);
for(int i = 0; i < used; i++)
{
out.writeInt(indexes[i]);
out.writeDouble(values[i]);
}
}
}
| 28,958 | 29.19708 | 207 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/SubMatrix.java |
package jsat.linear;
/**
* This class allows for the selection of an area of a matrix to operate on independently.
* Mutable matrix operations perform on this sub matrix, will be visible in the base matrix
* given. This allows for easy implementation of blocking algorithms, but care must be taken
* to not create and use two sub matrices that overlap and are altered simultaneously.
*
* @author Edward Raff
*/
public class SubMatrix extends GenericMatrix
{
private static final long serialVersionUID = 8842973175562587725L;
private Matrix baseMatrix;
private int firstRow, firstColumn, toRow, toCol;
/**
* Creates a new matrix that is a sub view of the given base matrix.
* @param baseMatrix the base matrix to get a view of
* @param firstRow the first row to consider as part of the sub matrix, starting from 0 inclusive
* @param firstColumn the first column to consider as part of the sub matrix, starting from 0 inclusive
* @param toRow the last row from the base matrix to consider, exclusive
* @param toCol the last column from the base matrix to consider, exclusive
*/
public SubMatrix(Matrix baseMatrix, int firstRow, int firstColumn, int toRow, int toCol)
{
this.baseMatrix = baseMatrix;
if(firstColumn < 0 || firstRow < 0 || toRow < 0 || toCol < 0)
throw new ArithmeticException("Can not give negative row or column counts");
else if(toRow == 0 || toCol == 0)
throw new ArithmeticException("Must give a positive number of rows and columns");
else if(toRow > baseMatrix.rows() || toCol > baseMatrix.cols())
throw new ArithmeticException("You can not specify a matrix that goes past the row / column boundry of the base matrix");
else if(firstRow >= toRow || firstColumn >= toCol)
throw new ArithmeticException("Illogical bounds given");
this.firstRow = firstRow;
this.firstColumn = firstColumn;
this.toRow = toRow;
this.toCol = toCol;
//If we are given a SubMatrix, lets adjust to use it directly instead of accesing through layers
if(baseMatrix instanceof SubMatrix)
{
SubMatrix given = (SubMatrix) baseMatrix;
this.baseMatrix = given.baseMatrix;
this.firstRow += given.firstRow;
this.firstColumn += given.firstColumn;
this.toRow = given.toRow;
this.toCol = given.toCol;
}
}
/**
* Returns the matrix that is the base for this sub matrix.
* @return the matrix that is the base for this sub matrix.
*/
public Matrix getBaseMatrix()
{
return baseMatrix;
}
/**
* Returns the row offset used from the base matrix
* @return the row offset used from the base matrix
*/
public int getFirstRow()
{
return firstRow;
}
/**
* Returns the column offset used from the base matrix
* @return the column offset used from the base matrix
*/
public int getFirstColumn()
{
return firstColumn;
}
@Override
protected Matrix getMatrixOfSameType(int rows, int cols)
{
return new DenseMatrix(rows, cols);//Ehhh... all well
}
@Override
public double get(int i, int j)
{
//We MUST do a bounds check, as they might go past us but an index that does exist in the base
if(i >= rows() || j >= cols())
throw new ArrayIndexOutOfBoundsException("Can not access index [" + i + ", " + j + "] in the matrix of dimension [" + rows() + ", " + cols() + "]");
return baseMatrix.get(i+firstRow, j+firstColumn);
}
@Override
public void set(int i, int j, double value)
{
//We MUST do a bounds check, as they might go past us but an index that does exist in the base
if(i >= rows() || j >= cols())
throw new ArrayIndexOutOfBoundsException("Can not access index [" + i + ", " + j + "] in the matrix of dimension [" + rows() + ", " + cols() + "]");
baseMatrix.set(i+firstRow, j+firstColumn, value);
}
@Override
public Vec getRowView(int r)
{
if(r >= rows())
throw new IndexOutOfBoundsException("Can not access row " + r + " of " + rows() +" by " + cols() + " matrix");
Vec origVec = baseMatrix.getRowView(r-firstRow);
return new SubVector(firstColumn, toCol-firstColumn, origVec);
}
@Override
public Vec getColumnView(int j)
{
if(j >= cols())
throw new IndexOutOfBoundsException("Can not access column " + j + " of " + rows() +" by " + cols() + " matrix");
Vec origVec = baseMatrix.getColumnView(j-firstColumn);
return new SubVector(firstRow, toRow-firstRow, origVec);
}
@Override
public int rows()
{
return toRow-firstRow;
}
@Override
public int cols()
{
return toCol-firstColumn;
}
@Override
public boolean isSparce()
{
return baseMatrix.isSparce();
}
/**
* This method alters the size of a matrix, either adding or subtracting
* rows from the internal structure of the matrix. Every resize call may
* cause a new allocation internally, and should not be called for excessive
* changing of a matrix. All added rows/ columns will have values of zero.
* If a row / column is removed, it is always the bottom/right most row /
* column removed. Values of the removed rows / columns will be lost.<br>
* <br>
* When a SubMatrix attempts to change size, it may alter the values of the
* underlying matrix in an unusual way. If the base matrix is large enough
* to hold the changed size, then the values in the underlying matrix that
* this SubMatrix is expanding to encompass will be set to zero. If the
* underlying matrix is not large enough, change size will be called upon it
* as well.<br>
* When decreasing the number of rows / columns, the SubMatrix will always
* reduce its view - but never shrink the underling matrix that the view is
* of.
* <br>
* To expand the size of a SubMatrix without zeroing out the new values,
* create a new SubMatrix object.
*
* @param newRows the new number of rows, must be positive
* @param newCols the new number of columns, must be positive.
*/
@Override
public void changeSize(int newRows, int newCols)
{
if(newRows <= 0)
throw new ArithmeticException("Matrix must have a positive number of rows");
if(newCols <= 0)
throw new ArithmeticException("Matrix must have a positive number of columns");
//Increase the underlying matrix to the needed size
int underNewRows = Math.max(newRows+firstRow, baseMatrix.rows());
int underNewCols = Math.max(newCols+firstColumn, baseMatrix.cols());
/*
* Either the size for the base stays the same, or gets increased
*/
baseMatrix.changeSize(underNewRows, underNewCols);
//Zero out the values we are expanding to
if(newRows > rows())
new SubMatrix(baseMatrix, toRow, firstColumn, firstRow+newRows, firstColumn+newCols).zeroOut();
if(newCols > cols())
new SubMatrix(baseMatrix, firstRow, toCol, firstRow+newRows, firstColumn+newCols).zeroOut();
toRow = firstRow+newRows;
toCol = firstColumn+newCols;
}
}
| 7,520 | 37.968912 | 160 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/SubVector.java |
package jsat.linear;
import java.util.Iterator;
import java.util.NoSuchElementException;
/**
* SubVector takes an already existing vector and creates a new one that is a
* subset of and backed by the original one. Altering the sub vector will effect
* the original and vise versa.
*
* @author Edward Raff
*/
public class SubVector extends Vec
{
private static final long serialVersionUID = -873882618035700676L;
private int startPosition;
private int length;
private Vec vec;
/**
* Creates a new sub vector of the input vector
*
* @param startPosition the starting index (inclusive) or the original
* vector
* @param length the length of the new sub vector
* @param vec the original vector to back this sub vector.
*/
public SubVector(int startPosition, int length, Vec vec)
{
if(startPosition < 0 || startPosition >= vec.length())
throw new IndexOutOfBoundsException("Start position out of bounds for input vector");
else if(length+startPosition > vec.length())
throw new IndexOutOfBoundsException("Length too long for start position for the given vector");
this.startPosition = startPosition;
this.length = length;
this.vec = vec;
}
@Override
public int length()
{
return length;
}
@Override
public double get(int index)
{
if(index >= length)
throw new IndexOutOfBoundsException("Index of " + index + " can not be accessed for length of " + length);
return vec.get(startPosition+index);
}
@Override
public void set(int index, double val)
{
if(index >= length)
throw new IndexOutOfBoundsException("Index of " + index + " can not be accessed for length of " + length);
vec.set(startPosition+index, val);
}
@Override
public boolean isSparse()
{
return vec.isSparse();
}
@Override
public Iterator<IndexValue> getNonZeroIterator(int start)
{
final Iterator<IndexValue> origIter = vec.getNonZeroIterator(startPosition+start);
Iterator<IndexValue> newIter = new Iterator<IndexValue>()
{
IndexValue nextVal = origIter.hasNext() ? origIter.next() : new IndexValue(Integer.MAX_VALUE, Double.NaN);
IndexValue curVal = new IndexValue(-1, Double.NaN);
@Override
public boolean hasNext()
{
return nextVal.getIndex() < length+startPosition;
}
@Override
public IndexValue next()
{
if(!hasNext())
throw new NoSuchElementException();
curVal.setIndex(nextVal.getIndex()-startPosition);
curVal.setValue(nextVal.getValue());
if(origIter.hasNext())
nextVal = origIter.next();
else
nextVal.setIndex(Integer.MAX_VALUE);
return curVal;
}
@Override
public void remove()
{
throw new UnsupportedOperationException("Not supported yet.");
}
};
return newIter;
}
@Override
public Vec clone()
{
if(vec.isSparse())
return new SparseVector(this);
else
return new DenseVector(this);
}
@Override
public void setLength(int length)
{
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
}
| 3,653 | 28 | 135 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/TransposeView.java |
package jsat.linear;
/**
* This class provides a free view of the transpose of a matrix. This is done by accessing
* the matrix elements in swapped order. This has a serious performance impact. If the base
* matrix storage is row major order, then accessing the TransposeView in column major order
* will provide the best performance.
*
* @author Edward Raff
*/
public class TransposeView extends GenericMatrix
{
private static final long serialVersionUID = 7762422292840392481L;
private Matrix base;
public TransposeView(Matrix base)
{
this.base = base;
}
@Override
public Vec getColumnView(int j)
{
return base.getRowView(j);
}
@Override
public Vec getRowView(int r)
{
return base.getColumnView(r);
}
@Override
protected Matrix getMatrixOfSameType(int rows, int cols)
{
return new DenseMatrix(rows, cols);
}
@Override
public double get(int i, int j)
{
return base.get(j, i);
}
@Override
public void set(int i, int j, double value)
{
base.set(j, i, value);
}
@Override
public int rows()
{
return base.cols();
}
@Override
public int cols()
{
return base.rows();
}
@Override
public boolean isSparce()
{
return base.isSparce();
}
@Override
public void changeSize(int newRows, int newCols)
{
base.changeSize(newCols, newRows);
}
}
| 1,500 | 18.24359 | 93 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/TruncatedSVD.java | /*
* This implementation contributed under the Public Domain.
*/
package jsat.linear;
import static java.lang.Math.max;
import static java.lang.Math.min;
import static java.lang.Math.nextUp;
import java.util.Arrays;
/**
* Computes the Truncated version of the Singular Value Decomposition (SVD).
* Given a rectangular matrix <b>A</b><sub>m,n </sub>, and a desired number of
* singular values <i>k</i>, the Truncated SVD computes <b>A</b><sub>m,n </sub>
* ≈ <b>U</b><sub>m,k </sub> <b>Σ</b><sub>k,k </sub>
* <b>V</b><sup>T</sup><sub>k,n </sub>. This is faster than computing the full
* SVD using {@link SingularValueDecomposition}, as only the top-<i>k</i>
* singular values and associated data will be computed. This implementation
* also
* supports sparse inputs.
*
* @author Edward Raff
*/
public class TruncatedSVD
{
private Matrix U, V;
/**
* Stores the diagonal values of the S matrix, and contains the bidiagonal values of A during initial steps.
*/
private double[] s;
/**
* Creates a new SVD of the matrix {@code A} such that A = U Σ V<sup>T</sup>.The matrix
* {@code A} will be modified and used as temp space when computing the SVD.
* @param A the matrix to create the SVD of
* @param k
*/
public TruncatedSVD(Matrix A, int k)
{
DenseVector invertS = new DenseVector(k);
if(A.rows() < A.cols())//U will be smaller than V, so lets compute U with Eigen, and reconstruc V
{
Lanczos u_lanc = new Lanczos(A, k, true, false);
U = u_lanc.getEigenVectors();
s = u_lanc.d;
for(int i = 0; i < k; i++)
{
s[i] = Math.sqrt(Math.max(s[i], 0.0));
if(s[i] == 0)//numerical issue
invertS.set(i, 0.0);
else
invertS.set(i, 1/s[i]);
}
//V = (A^T * u * (diag(1/s)))
V = A.transposeMultiply(U);
Matrix.diagMult(V, invertS);
V = V.transpose();
}
else
{
Lanczos v_lanc = new Lanczos(A, k, false, false);
V = v_lanc.getEigenVectors().transpose();
s = v_lanc.d;
for(int i = 0; i < k; i++)
{
s[i] = Math.sqrt(Math.max(s[i], 0.0));
if(s[i] == 0)//numerical issue
invertS.set(i, 0.0);
else
invertS.set(i, 1/s[i]);
}
//U = (X * diag(1/s) *v)^T
//TODO this is inefficent, need to add new function to replace
Matrix tmp = V.clone();
Matrix.diagMult(invertS, tmp);
U = A.multiplyTranspose(tmp);
}
}
private int sLength()
{
return min(U.rows(), V.rows());
}
/**
* Returns the backing matrix U of the SVD. Do not alter this matrix.
* @return the matrix U of the SVD
*/
public Matrix getU()
{
return U;
}
/**
* Returns the backing matrix V of the SVD. Do not later this matrix.
* @return the matrix V of the SVD
*/
public Matrix getV()
{
return V;
}
/**
* Returns a copy of the sorted array of the singular values, include the near zero ones.
* @return a copy of the sorted array of the singular values, including the near zero ones.
*/
public double[] getSingularValues()
{
return Arrays.copyOf(s, sLength());
}
/**
* Returns the diagonal matrix S such that the SVD product results in the original matrix. The diagonal contains the singular values.
* @return a dense diagonal matrix containing the singular values
*/
public Matrix getS()
{
Matrix DS = new DenseMatrix(U.rows(), V.rows());
for(int i = 0; i < sLength(); i++)
DS.set(i, i, s[i]);
return DS;
}
/**
* Returns the 2 norm of the matrix, which is the maximal singular value.
* @return the 2 norm of the matrix
*/
public double getNorm2()
{
return s[0];
}
/**
* Returns the condition number of the matrix. The condition number is a positive measure of the numerical
* instability of the matrix. The larger the value, the less stable the matrix. For singular matrices,
* the result is {@link Double#POSITIVE_INFINITY}.
* @return the condition number of the matrix
*/
public double getCondition()
{
return getNorm2()/s[sLength()-1];
}
private double getDefaultTolerance()
{
return max(U.rows(), V.rows())*(nextUp(getNorm2())-getNorm2());
}
/**
* Returns the numerical rank of the matrix. Near zero values will be ignored.
* @return the rank of the matrix
*/
public int getRank()
{
return getRank(getDefaultTolerance());
}
/**
* Indicates whether or not the input matrix was of full rank, full
* rank matrices are more numerically stable.
*
* @return <tt>true</tt> if the matrix was of full tank
*/
public boolean isFullRank()
{
return getRank() == sLength();
}
/**
* Returns the numerical rank of the matrix. Values <= than <tt>tol</tt> will be ignored.
* @param tol the cut of for singular values
* @return the rank of the matrix
*/
public int getRank(double tol)
{
for(int i = 0; i < sLength(); i++)
if(s[i] <= tol)
return i;
return sLength();
}
/**
* Returns an array containing the inverse singular values. Near zero values are converted to zero.
* @return an array containing the inverse singular values
*/
public double[] getInverseSingularValues()
{
return getInverseSingularValues(getDefaultTolerance());
}
/**
* Returns an array containing the inverse singular values. Values that are <= <tt>tol</tt> are converted to zero.
* @param tol the cut of for singular values
* @return an array containing the inverse singular values
*/
public double[] getInverseSingularValues(double tol)
{
double[] sInv = Arrays.copyOf(s, sLength());
for(int i = 0; i < sInv.length; i++)
if(sInv[i] > tol)
sInv[i] = 1.0/sInv[i];
else
sInv[i] = 0;
return sInv;
}
}
| 6,501 | 30.110048 | 138 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/Vec.java |
package jsat.linear;
import java.io.Serializable;
import static java.lang.Math.*;
import java.util.Arrays;
import java.util.Iterator;
import java.util.Random;
import jsat.math.Function1D;
import jsat.math.IndexFunction;
import jsat.utils.random.RandomUtil;
/**
* Vec is a object representing the math concept of a vector. A vector could be
* either sparse or dense, where sparse vectors have a high number of zero
* values that are not explicitly stored.
* <br><br>
* This abstract class provides a large number of pre-implemented methods. Some
* of which are implemented only for a dense vector, or may not be completely
* efficient for the underlying implementation. Methods that should be
* considered for overloading by an implementation will be indicated in the
* documentation.
*
* @author Edward Raff
*/
public abstract class Vec implements Cloneable, Iterable<IndexValue>, Serializable
{
private static final long serialVersionUID = 9035784536820782955L;
/**
* Returns the length of this vector
* @return the length of this vector
*/
abstract public int length();
/**
*
* @return the number of NaNs present in this vector
*/
public int countNaNs()
{
int nans = 0;
for(IndexValue iv : this)
if(Double.isNaN(iv.getValue()))
nans++;
return nans;
}
/**
* Indicates whether or not this vector can be mutated. If
* {@code false}, any method that contains "mutate" will not work.
* <br><br>
* By default, this returns {@code true}
*
* @return {@code true} if the vector supports being altered, {@code false}
* other wise.
*/
public boolean canBeMutated()
{
return true;
}
/**
* Returns a suitable vector that can be altered for some function of the
* form a <i>op</i> b, where {@code a = this}
*
* @param other the other vector. May be {@code null}
* @return the mutable vector
*/
private Vec getThisSide(Vec other)
{
if (this.canBeMutated())
return this.clone();
if (other == null)
if (this.isSparse())
return new SparseVector(this);
else
return new DenseVector(this);
if (this.isSparse() && other.isSparse())
return new SparseVector(this);
else
return new DenseVector(this);
}
/**
* Computes the number of non zero values in this vector
* @return the number of non zero values stored
*/
public int nnz()
{
int nnz = 0;
for(IndexValue i : this)
nnz++;
return nnz;
}
/**
* Gets the value stored at a specific index in the vector
* @param index the index to access
* @return the double value in the vector
* @throws IndexOutOfBoundsException if the index given is greater than or
* equal to its {@link #length() }
*/
abstract public double get(int index);
/**
* Sets the value stored at a specified index in the vector
* @param index the index to access
* @param val the value to store in the index
* @throws IndexOutOfBoundsException if the index given is greater than or
* equal to its {@link #length() }
*/
abstract public void set(int index, double val);
/**
* Increments the value stored at a specified index in the vector
* @param index the index to access
* @param val the value to store in the index
* @throws IndexOutOfBoundsException if the index given is greater than or
* equal to its {@link #length() }
*/
public void increment(int index, double val)
{
set(index, val+get(index));
}
/**
* Returns a new vector that is the result of {@code this + c}
* @param c the constant to add
* @return the result of adding {@code c} to {@code this}
*/
public Vec add(double c)
{
Vec toRet = this.getThisSide(null);
toRet.mutableAdd(c);
return toRet;
}
/**
* Returns a new vector that is the result of {@code this + b}
* @param b the vector to add
* @return the result of {@code b + this}
*/
public Vec add(Vec b)
{
Vec toRet = this.getThisSide(b);
toRet.mutableAdd(b);
return toRet;
}
/**
* Returns a new vector that is the result of {@code this - c}
* @param c the constant to subtract
* @return the result of {@code this - c}
*/
public Vec subtract(double c)
{
return add(-c);
}
/**
* Returns a new vector that is the result of {@code this - b}
* @param b the vector to subtract from {@code this}
* @return the result of {@code this - b}
*/
public Vec subtract(Vec b)
{
Vec toRet = this.getThisSide(b);
toRet.mutableSubtract(b);
return toRet;
}
/**
* Returns a new vector that is the result of multiplying each value in
* {@code this} by its corresponding value in {@code b}
* @param b the vector to pairwise multiply by
* @return the result of the pairwise multiplication of {@code b} onto the
* values of {@code this}
*/
public Vec pairwiseMultiply(Vec b)
{
Vec toRet = this.getThisSide(b);
toRet.mutablePairwiseMultiply(b);
return toRet;
}
/**
* Returns a new vector that is the result of {@code this * c}
* @param c the constant to multiply by
* @return the result of {@code this * c}
*/
public Vec multiply(double c)
{
Vec toRet = this.getThisSide(null);
toRet.mutableMultiply(c);
return toRet;
}
/**
* Returns a new vector that is the result of the vector matrix product
* <tt>this<sup>T</sup>A</tt>
* @param A the matrix to multiply with
* @return the vector matrix product
*/
public Vec multiply(Matrix A)
{
DenseVector b = new DenseVector(A.cols());
this.multiply(A, b);
return b;
}
/**
* If this is vector <tt>a</tt>, this this computes b = b + <tt>a</tt><sup>T</sup>*<tt>A</tt>
* @param A the matrix to multiple by
* @param b the vector to mutate by adding the result to
*/
public void multiply(Matrix A, Vec b)
{
multiply(1, A, b);
}
/**
* If this is vector <tt>a</tt>, this this computes b = b + c <tt>a</tt><sup>T</sup>*<tt>A</tt>
* @param c the constant factor to multiply by
* @param A the matrix to multiple by
* @param b the vector to mutate by adding the result to
*/
public void multiply(double c, Matrix A, Vec b)
{
if (this.length() != A.rows())
throw new ArithmeticException("Vector x Matrix dimensions do not agree [1," + this.length() + "] x [" + A.rows() + ", " + A.cols() + "]");
if (b.length() != A.cols())
throw new ArithmeticException("Destination vector is not the right size");
if (!isSparse())
{
for (int i = 0; i < this.length(); i++)
{
double this_i = c * get(i);
for (int j = 0; j < A.cols(); j++)
b.increment(j, this_i * A.get(i, j));
}
}
else
{
for (IndexValue iv : this)
{
final int i = iv.getIndex();
double this_i = c * iv.getValue();
for (int j = 0; j < A.cols(); j++)
b.increment(j, this_i * A.get(i, j));
}
}
}
/**
* Returns a new vector that is the result of dividing each value in
* {@code this} by the value in the same index in {@code b}
* @param b the vector to pairwise divide by
* @return the result of pairwise division of {@code this} by {@code b}
*/
public Vec pairwiseDivide(Vec b)
{
Vec toRet = this.getThisSide(b);
toRet.mutablePairwiseDivide(b);
return toRet;
}
/**
* Returns a new vector that is the result of {@code this / c}
* @param c the constant to divide by
* @return the result of {@code this / c}
*/
public Vec divide(double c)
{
Vec toRet = this.getThisSide(null);
toRet.mutableDivide(c);
return toRet;
}
/**
* Alters this vector such that
* <tt>this</tt> = <tt>this</tt> + <tt>c</tt>
* <br><br>
* This method should be overloaded for a serious implementation.
*
* @param c a scalar constant to add to each value in this vector
*/
public void mutableAdd(double c)
{
for(int i = 0; i < length(); i++)
increment(i, c);
}
/**
* Alters this vector such that
* <tt>this</tt> = <tt>this</tt> + <tt>c</tt> * <tt>b</tt>
* <br><br>
* This method should be overloaded for a serious implementation.
*
* @param c a scalar constant
* @param b the vector to add to this
*/
public void mutableAdd(double c, Vec b)
{
if(length() != b.length())
throw new ArithmeticException("Vectors must have the same length, not " + length() + " and " + b.length());
if(b.isSparse())
for(IndexValue iv : b)
increment(iv.getIndex(), c*iv.getValue());
else
for(int i = 0; i < length(); i++)
increment(i, c*b.get(i));
}
/**
* Alters this vector such that
* <tt>this</tt> = <tt>this</tt> + <tt>b</tt>
* @param b the vector to add to this
* @throws ArithmeticException if the vectors do not have the same length
*/
public void mutableAdd(Vec b)
{
this.mutableAdd(1, b);
}
/**
* Alters this vector such that
* <tt>this</tt> = <tt>this</tt> - <tt>c</tt>
* @param c the scalar constant to subtract from all values in this vector
*/
public void mutableSubtract(double c)
{
mutableAdd(-c);
}
/**
* Alters this vector such that
* <tt>this</tt> = <tt>this</tt> - <tt>c</tt> * <tt>b</tt>
* @param c a scalar constant
* @param b the vector to subtract from this
* @throws ArithmeticException if the vectors do not have the same length
*/
public void mutableSubtract(double c, Vec b)
{
this.mutableAdd(-c, b);
}
/**
* Alters this vector such that
* <tt>this</tt> = <tt>this</tt> - <tt>b</tt>
* @param b the vector to subtract from this
* @throws ArithmeticException if the vectors are not the same length
*/
public void mutableSubtract(Vec b)
{
this.mutableAdd(-1, b);
}
/**
* Mutates {@code this} by multiplying each value by the value in {@code b}
* that has the same index.
* <br><br>
* This method should be overloaded for a serious implementation.
*
* @param b the vector to pairwise multiply by
*/
public void mutablePairwiseMultiply(Vec b)
{
if(length() != b.length())
throw new ArithmeticException("Vector lengths do not agree " + length() + " vs " + b.length());
for(int i = 0; i < length(); i++)
set(i, get(i)*b.get(i));
}
/**
* Mutates {@code this *= c}
* <br><br>
* This method should be overloaded for a serious implementation.
*
* @param c the constant to multiply by
*/
public void mutableMultiply(double c)
{
for(int i = 0; i < length(); i++)
set(i, get(i)*c);
}
/**
* Mutates {@code this} by dividing each value by the value in {@code b}
* that has the same index
* <br><br>
* This method should be overloaded for a serious implementation.
*
* @param b the vector to pairwise divide by
*/
public void mutablePairwiseDivide(Vec b)
{
if(length() != b.length())
throw new ArithmeticException("Vector lengths do not agree " + length() + " vs " + b.length());
for(int i = 0; i < length(); i++)
set(i, get(i)/b.get(i));
}
/**
* Mutates {@code this /= c}
* <br><br>
* This method should be overloaded for a serious implementation.
*
* @param c the constant to divide by
*/
public void mutableDivide(double c)
{
for(int i = 0; i < length(); i++)
set(i, get(i)/c);
}
/**
* Returns a copy of this array with the values moved around so that they are in sorted order
* @return a new array in sorted order
*/
public Vec sortedCopy()
{
double[] arrayCopy = arrayCopy();
Arrays.sort(arrayCopy);
return new DenseVector(arrayCopy);
}
/**
* Returns the minimum value stored in this vector
*
* @return the minimum value in this vector
*/
public double min()
{
if (isSparse() && nnz() < length())
{
double min = 0.0;
for (IndexValue iv : this)
min = Math.min(min, iv.getValue());
return min;
}
else
{
double min = get(0);
for (int i = 1; i < length(); i++)
min = Math.min(min, get(i));
return min;
}
}
/**
* Returns the maximum value stored in this vector
*
* @return the maximum value in this vector
*/
public double max()
{
if (isSparse() && nnz() < length())
{
double max = 0.0;
for (IndexValue iv : this)
max = Math.max(max, iv.getValue());
return max;
}
else
{
double max = get(0);
for (int i = 1; i < length(); i++)
max = Math.max(max, get(i));
return max;
}
}
/**
* Computes the sum of the values in this vector
* @return the sum of this vector's values
*/
public double sum()
{
/*
* Uses Kahan summation algorithm, which is more accurate then
* naively summing the values in floating point. Though it
* does not guarenty the best possible accuracy
*
* See: http://en.wikipedia.org/wiki/Kahan_summation_algorithm
*/
double sum = 0;
double c = 0;
for(IndexValue iv : this)
{
double d = iv.getValue();
double y = d - c;
double t = sum+y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
/**
* Computes the mean value of all values stored in this vector
* @return the mean value
*/
public double mean()
{
return sum()/length();
}
/**
* Computes the standard deviation of the values in this vector
* @return the standard deviation
*/
public double standardDeviation()
{
return Math.sqrt(variance());
}
/**
* Computes the variance of the values in this vector, which is
* {@link #standardDeviation() }<sup>2</sup>
* @return the variance
*/
public double variance()
{
double mu = mean();
double variance = 0;
double N = length();
int used = 0;
for(IndexValue x : this)
{
used++;
variance += Math.pow(x.getValue()-mu, 2)/N;
}
//Now add all the zeros we skipped into it
variance += (length()-used) * Math.pow(0-mu, 2)/N;
return variance;
}
/**
* Returns the median value in this vector
* @return the median
*/
public double median()
{
Vec copy = sortedCopy();
if(copy.length() % 2 != 0)
return copy.get(copy.length()/2);
else
return copy.get(copy.length()/2)/2+copy.get(copy.length()/2+1)/2;
}
/**
* Computes the skewness of this vector, which is the 3rd moment.
* @return the skewness
*/
public double skewness()
{
double mean = mean();
double tmp = 0;
int length = length();
int used = 0;
for(IndexValue iv : this)
{
tmp += pow(iv.getValue()-mean, 3);
used++;
}
//All the zero's we skiped
tmp += pow(-mean, 3)*(length-used);
double s1 = tmp / (pow(standardDeviation(), 3) * (length-1) );
if(length >= 3)//We can use the bias corrected formula
return sqrt(length*(length-1))/(length-2)*s1;
return s1;
}
/**
* Computes the kurtosis of this vector, which is the 4th moment.
* @return the kurtosis
*/
public double kurtosis()
{
double mean = mean();
double tmp = 0;
final int length = length();
int used = 0;
for(IndexValue iv : this)
{
tmp += pow(iv.getValue()-mean, 4);
used++;
}
//All the zero's we skipped
tmp += pow(-mean, 4)*(length-used);
return tmp / (pow(standardDeviation(), 4) * (length-1) ) - 3;
}
/**
* Indicates whether or not this vector is optimized for sparce computation,
* meaning that most values in the vector are zero - and considered
* implicit. Only non-zero values are stored.
* @return <tt>true</tt> if the vector is sparce, <tt>false</tt> otherwise.
*/
abstract public boolean isSparse();
/**
* Copies the values of this Vector into another vector
* @param destination the vector to store the values in.
* @throws ArithmeticException if the vectors are not of the same length
*/
public void copyTo(Vec destination)
{
if(this.length() != destination.length())
throw new ArithmeticException("Source and destination must be the same size");
if (this.isSparse())
{
destination.zeroOut();
for (IndexValue iv : this)
destination.set(iv.getIndex(), iv.getValue());
}
else
{
for (int i = 0; i < length(); i++)
destination.set(i, this.get(i));
}
}
/**
* Copies the values of this vector into a row of another Matrix
* @param A the matrix to store the contents of this vector in
* @param row the row of the matrix to store the values to
* @throws ArithmeticException if the columns of the matrix is not the same as the length of this vector.
*/
public void copyToRow(Matrix A, int row)
{
if(this.length() != A.cols())
throw new ArithmeticException("Destination matrix does not have the same number of columns as this has rows");
for(int i = 0; i < length(); i++)
A.set(row, i, get(i));
}
/**
* Copies the values of this vector into a column of another Matrix.
* @param A the matrix to store the contents of this vector in
* @param col the column of the matrix to store the values to
*/
public void copyToCol(Matrix A, int col)
{
if(this.length() != A.rows())
throw new ArithmeticException("Destination matrix does not have the same number of rows as this has rows");
for(int i = 0; i < length(); i++)
A.set(i, col, get(i));
}
@Override
abstract public Vec clone();
/**
* Returns a new vector that is the result of normalizing this vector by the
* L<sub>2</sub> norm
* @return a normalized version of this vector
*/
public Vec normalized()
{
Vec toRet = this.getThisSide(null);
toRet.normalize();
return toRet;
}
/**
* Mutates this vector to be normalized by the L<sub>2</sub> norm
*/
public void normalize()
{
mutableDivide(Math.max(pNorm(2.0), 1e-10));
}
/**
* Applies the given function to each and every value in the vector.
* <br><br>
* This method should be overloaded for a serious implementation.
*
* @param f the single variable function to apply
*/
public void applyFunction(Function1D f)
{
for(int i = 0; i < length(); i++)
set(i, f.f(get(i)));
}
/**
* Applies the given function to each and every value in the vector.
* The function takes 2 arguments, an arbitrary value, and then an
* index. The index passed to the function is the index in the array
* that the value came from.
* <br><br>
* <b><i>NOTE:</i></b> Because negative values are invalid indexes.
* The given function should return 0.0 when given a negative index,
* if and only if, f(0,index) = 0 for any valid index. If f(0, index)
* != 0 for even one value of index, it should return any non zero
* value when given a negative index.
* <br><br>
* IE: f(value_i, i) = x
* <br><br>
* This method should be overloaded for a serious implementation.
*
* @param f the 2 dimensional index function to apply
*/
public void applyIndexFunction(IndexFunction f)
{
for(int i = 0; i < length(); i++)
set(i, f.indexFunc(get(i), i));
}
/**
* Returns the p-norm distance between this and another vector y.
* @param p the distance type. 2 is the common value
* @param y the other vector to compare against
* @return the p-norm distance
*/
public double pNormDist(double p, Vec y)
{
Iterator<IndexValue> thisIter = this.iterator();
Iterator<IndexValue> otherIter = y.iterator();
if (!thisIter.hasNext())
if (!otherIter.hasNext())
return 0;
else
return y.pNorm(p);
else if (!otherIter.hasNext())
return this.pNorm(p);
double result = 0;
IndexValue av = thisIter.next();
IndexValue bv = otherIter.next();
do
{
boolean nextA = false, nextB = false;
if (av.getIndex() == bv.getIndex())
{
result += pow(abs(av.getValue() - bv.getValue()), p);
nextA = nextB = true;
}
else if(av.getIndex() < bv.getIndex())
{
result += pow(abs(av.getValue()), p);
nextA = true;
}
else if(av.getIndex() > bv.getIndex())
{
result += pow(abs(bv.getValue()), p);
nextB = true;
}
if(nextA)
av = thisIter.hasNext() ? thisIter.next() : null;
if(nextB)
bv = otherIter.hasNext() ? otherIter.next() : null;
}
while (av != null && bv != null);
//accumulate left overs
while(av != null)
{
result += pow(abs(av.getValue()), p);
av = thisIter.hasNext() ? thisIter.next() : null;
}
while(bv != null)
{
result += pow(abs(bv.getValue()), p);
bv = otherIter.hasNext() ? otherIter.next() : null;
}
return pow(result, 1/p);
}
/**
* Returns the p-norm of this vector.
* @param p the norm type. 2 is a common value
* @return the p-norm of this vector
*/
public double pNorm(double p)
{
if (p <= 0)
throw new IllegalArgumentException("norm must be a positive value, not " + p);
double result = 0;
if (p == 1)
{
for (IndexValue iv : this)
result += abs(iv.getValue());
}
else if (p == 2)
{
for (IndexValue iv : this)
result += iv.getValue() * iv.getValue();
result = Math.sqrt(result);
}
else if (Double.isInfinite(p))
{
for (IndexValue iv : this)
result = Math.max(result, abs(iv.getValue()));
}
else
{
for (IndexValue iv : this)
result += pow(abs(iv.getValue()), p);
result = pow(result, 1 / p);
}
return result;
}
/**
* Computes the dot product between two vectors, which is equivalent to<br>
* <big>Σ</big> this<sub>i</sub>*v<sub>i</sub>
* <br><br>
* This method should be overloaded for a serious implementation.
*
* @param v the other vector
* @return the dot product of this vector and another
*/
public double dot(Vec v)
{
double dot = 0;
if(!this.isSparse() && v.isSparse())
for(IndexValue iv : v)
dot += get(iv.getIndex())*iv.getValue();
else if(this.isSparse() && !v.isSparse())
for(IndexValue iv : this)
dot += iv.getValue()*v.get(iv.getIndex());
else if(this.isSparse() && v.isSparse())
{
Iterator<IndexValue> aIter = this.getNonZeroIterator();
Iterator<IndexValue> bIter = v.getNonZeroIterator();
if(this.nnz() == 0 || v.nnz() == 0)
return 0;//All zeros? dot is zer
//each must have at least one
IndexValue aCur = aIter.next();
IndexValue bCur = bIter.next();
while(aCur != null && bCur != null)//set to null when have none left
{
if(aCur.getIndex() == bCur.getIndex())
{
dot += aCur.getValue()*bCur.getValue();
if(aIter.hasNext())
aCur = aIter.next();
else
aCur = null;
if(bIter.hasNext())
bCur = bIter.next();
else
bCur = null;
}
else if(aCur.getIndex() < bCur.getIndex())
{
//Move a over to try and get the indecies equal
if(aIter.hasNext())
aCur = aIter.next();
else
aCur = null;
}
else//b is too small, move it over and try to get them lined up
{
if(bIter.hasNext())
bCur = bIter.next();
else
bCur = null;
}
}
}
else
for(int i = 0; i < length(); i++)
dot += get(i)*v.get(i);
return dot;
}
@Override
public String toString()
{
StringBuilder sb = new StringBuilder("[");
sb.append(get(0));
for(int i = 1; i < length(); i++)
sb.append(",").append(get(i));
sb.append("]");
return sb.toString();
}
@Override
public boolean equals(Object obj)
{
return equals(obj, 0.0);
}
public boolean equals(Object obj, double range)
{
if(!(obj instanceof Vec))
return false;
Vec other = (Vec) obj;
range = abs(range);
Iterator<IndexValue> thisIter = this.iterator();
Iterator<IndexValue> otherIter = other.iterator();
if (!thisIter.hasNext())
if (!otherIter.hasNext())
return true;
else
return false;
else if (!otherIter.hasNext())
return false;
IndexValue av = thisIter.next();
IndexValue bv = otherIter.next();
do
{
boolean nextA = false, nextB = false;
if (av.getIndex() == bv.getIndex())
{
if(abs(av.getValue() - bv.getValue()) > range)
if (Double.isNaN(av.getValue()) && Double.isNaN(bv.getValue()))//NaN != NaN is always true, so check special
return true;
else
return false;
nextA = nextB = true;
}
else if(av.getIndex() < bv.getIndex())
{
if(abs(av.getValue()) > range)
return false;
nextA = true;
}
else if(av.getIndex() > bv.getIndex())
{
if(abs(bv.getValue()) > range)
return false;
nextB = true;
}
if(nextA)
av = thisIter.hasNext() ? thisIter.next() : null;
if(nextB)
bv = otherIter.hasNext() ? otherIter.next() : null;
}
while (av != null && bv != null);
while(av != null)
{
if(abs(av.getValue()) > range)
return false;
av = thisIter.hasNext() ? thisIter.next() : null;
}
while(bv != null)
{
if(abs(bv.getValue()) > range)
return false;
bv = otherIter.hasNext() ? otherIter.next() : null;
}
return true;
}
/**
* Creates a new array that contains all the values of this vector in the
* appropriate indices
* @return a new array that is a copy of this vector
*/
public double[] arrayCopy()
{
double[] array = new double[length()];
for(IndexValue iv : this)
array[iv.getIndex()] = iv.getValue();
return array;
}
@Override
public Iterator<IndexValue> iterator()
{
return getNonZeroIterator(0);
}
/**
* Returns an iterator that will go over the non zero values in the given
* vector. The iterator does not support the {@link Iterator#remove() }
* method.
*
* @return an iterator for the non zero index value pairs.
*/
public Iterator<IndexValue> getNonZeroIterator()
{
return getNonZeroIterator(0);
}
/**
* Returns an iterator that will go over the non zero values starting from
* the specified index in the given vector. The iterator does not support
* the {@link Iterator#remove() } method.
* <br><br>
* This method should be overloaded for a serious implementation.
*
* @param start the first index (inclusive) to start returning non-zero
* values from
* @return an iterator for the non zero index value pairs
*/
public Iterator<IndexValue> getNonZeroIterator(int start)
{
//Need a little class magic
final Vec magic = this;
int i;
for(i = start; i < magic.length(); i++)
if(magic.get(i) != 0.0)
break;
final int fnz = (magic.length() == 0 || magic.length() <= i || magic.get(i) == 0.0 ) ? -1 : i;
Iterator<IndexValue> itor = new Iterator<IndexValue>()
{
int curIndex = 0;
int nextNonZero = fnz;
IndexValue indexValue = new IndexValue(-1, Double.NaN);
@Override
public boolean hasNext()
{
return nextNonZero >= 0;
}
@Override
public IndexValue next()
{
if(nextNonZero == -1)
return null;
indexValue.setIndex(nextNonZero);
indexValue.setValue(get(nextNonZero));
int i = nextNonZero+1;
nextNonZero = -1;
for(; i < magic.length(); i++ )
if(get(i) != 0.0)
{
nextNonZero = i;
break;
}
return indexValue;
}
@Override
public void remove()
{
throw new UnsupportedOperationException("Not supported yet.");
}
};
return itor;
}
/**
* Zeroes out all values in this vector
* <br><br>
* This method should be overloaded for a serious implementation.
*/
public void zeroOut()
{
for(int i = 0; i < length(); i++)
set(i, 0.0);
}
/**
* Provides a hashcode for Vectors. All vector implementations should return the
* same result for cases when {@link #equals(java.lang.Object) } returns true.
* Below is the code used for this class<br>
* <p><code>
* int result = 1;<br>
* <br>
* for (int i = 0; i < length(); i++) <br>
* {<br>
* double val = get(i);<br>
* if(val != 0)<br>
* {<br>
* long bits = Double.doubleToLongBits(val);<br>
* result = 31 * result + (int)(bits ^ (bits >>> 32));<br>
* result = 31 * result + i;<br>
* }<br>
* }<br>
* <br>
* return 31* result + length();<br>
* </code></p>
* @return the hash code for a vector
*/
@Override
public int hashCode()
{
int result = 1;
for (int i = 0; i < length(); i++)
{
double val = get(i);
if(val != 0)
{
long bits = Double.doubleToLongBits(val);
result = 31 * result + (int)(bits ^ (bits >>> 32));
result = 31 * result + i;
}
}
return 31* result + length();
}
/**
* Creates a dense vector full of random values in the range [0, 1]
* @param length the length of the random vector to create
* @return a random vector of the specified length
*/
public static Vec random(int length)
{
return random(length, RandomUtil.getRandom());
}
/**
* Creates a dense vector full of random values in the range [0, 1]
* @param length the length of the random vector to create
* @param rand the source of randomness
* @return a random vector of the specified length
*/
public static Vec random(int length, Random rand)
{
Vec v = new DenseVector(length);
for(int i = 0; i < length; i++)
v.set(i, rand.nextDouble());
return v;
}
/**
* Creates a dense vector full of zeros.
* @param length the length of the vector to create
* @return a vector of zeros
*/
public static Vec zeros(int length)
{
return new DenseVector(length);
}
/**
* Changes the length of the given vector. The length can always be
* extended. The length can be reduced down to the size of the largest non
* zero element.<br>
* <br>
* NOTE: this function is not mandatory. Vectors that are views or implicit
* transformations of another vector are unlikely to support this
* functionality. Vectors that are base level representations should support
* this function.
*
* @param length the new length for this vector
*/
abstract public void setLength(int length);
}
| 35,449 | 28.99154 | 150 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/VecOps.java |
package jsat.linear;
import java.util.Iterator;
import jsat.math.Function;
import jsat.math.Function1D;
/**
* This class provides efficient implementations of use full vector
* operations and updates. The implementations are efficient for
* both dense and sparse vectors.
*
* @author Edward Raff
*/
public class VecOps
{
/**
* Bad value to be given as a default so that the index returned is always invalid.
* Thus, we can avoid null checks and just check on the index - avoiding additional code.
*/
private static final IndexValue badIV = new IndexValue(-1, Double.NaN);
/**
* Computes the result of <big>∑</big><sub>∀ i ∈ |w|</sub> w<sub>i</sub>
* f(x<sub>i</sub>-y<sub>i</sub>)
* @param w the vector of weight values to multiply on the results
* @param x the first vector of values in the difference
* @param y the second vector of values in the difference
* @param f the single variate function to apply to the difference computed
* @return the accumulated sum of the evaluations
*/
public static double accumulateSum(final Vec w, final Vec x, final Vec y, final Function1D f)
{
if(w.length() != x.length() || x.length() != y.length())
throw new ArithmeticException("All 3 vector inputs must have equal lengths");
double val = 0;
final boolean skipZeros = f.f(0) == 0;
final boolean wSparse = w.isSparse();
final boolean xSparse = x.isSparse();
final boolean ySparse = y.isSparse();
//skip zeros applied to (x_i-y_i) == 0. We can always skip zeros in w
if (wSparse && !xSparse && !ySparse)
{
for (IndexValue wiv : w)
{
final int idx = wiv.getIndex();
val += wiv.getValue() * f.f(x.get(idx) - y.get(idx));
}
}
else if (!wSparse && !xSparse && !ySparse)//w is dense
{
for (int i = 0; i < w.length(); i++)
val += w.get(i) * f.f(x.get(i) - y.get(i));
}
else //Best for all sparse, but also works well in general
{
Iterator<IndexValue> xIter = x.iterator();
Iterator<IndexValue> yIter = y.iterator();
IndexValue xiv = xIter.hasNext() ? xIter.next() : badIV;
IndexValue yiv = yIter.hasNext() ? yIter.next() : badIV;
for (IndexValue wiv : w)
{
int index = wiv.getIndex();
double w_i = wiv.getValue();
while (xiv.getIndex() < index && xIter.hasNext())
xiv = xIter.next();
while (yiv.getIndex() < index && yIter.hasNext())
yiv = yIter.next();
final double x_i, y_i;
if (xiv.getIndex() == index)
x_i = xiv.getValue();
else
x_i = 0;
if (yiv.getIndex() == index)
y_i = yiv.getValue();
else
y_i = 0;
if (skipZeros && x_i == 0 && y_i == 0)
continue;
val += w_i * f.f(x_i - y_i);
}
}
return val;
}
/**
* Computes the weighted dot product of <big>∑</big><sub>∀ i ∈ |w|</sub> w_i x_i y_i
* @param w the vector containing the weights, it is assumed to be random access
* @param x the first vector of the dot product
* @param y the second vector of the dot product
* @return the weighted dot product, which is equivalent to the sum of the products of each index for each vector
*/
public static double weightedDot(final Vec w, final Vec x, final Vec y)
{
if(w.length() != x.length() || x.length() != y.length())
throw new ArithmeticException("All 3 vector inputs must have equal lengths");
double sum = 0;
if(x.isSparse() && y.isSparse())
{
Iterator<IndexValue> xIter = x.iterator();
Iterator<IndexValue> yIter = y.iterator();
IndexValue xiv = xIter.hasNext() ? xIter.next() : badIV;
IndexValue yiv = yIter.hasNext() ? yIter.next() : badIV;
while(xiv != badIV && yiv != badIV)
{
if(xiv.getIndex() < yiv.getIndex())
xiv = xIter.hasNext() ? xIter.next() : badIV;
else if(xiv.getIndex() > yiv.getIndex())
yiv = yIter.hasNext() ? yIter.next() : badIV;
else//on the same page
{
sum += w.get(xiv.getIndex())*xiv.getValue()*yiv.getValue();
xiv = xIter.hasNext() ? xIter.next() : badIV;
yiv = yIter.hasNext() ? yIter.next() : badIV;
}
}
}
else if(x.isSparse())
{
for(IndexValue iv : x)
{
int indx = iv.getIndex();
sum += w.get(indx)*iv.getValue()*y.get(indx);
}
}
else if(y.isSparse())
return weightedDot(w, y, x);
else//all dense
{
for(int i = 0; i < w.length(); i++)
sum += w.get(i)*x.get(i)*y.get(i);
}
return sum;
}
}
| 5,382 | 34.649007 | 117 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/VecPaired.java |
package jsat.linear;
import java.util.Comparator;
import java.util.Iterator;
/**
* This data structure allows to wrap a Vector so that it is
* associated with some object time. Note, that operations
* that return a vector will not be a Paired Vector, as there
* is no reason to associate a different vector with this
* vector's pair.
*
* @author Edward Raff
*/
public class VecPaired<V extends Vec, P> extends Vec
{
private static final long serialVersionUID = 8039272826439917423L;
private V vector;
private P pair;
public VecPaired(V v, P p)
{
this.vector = v;
this.pair = p;
}
public P getPair()
{
return pair;
}
public void setPair(P pair)
{
this.pair = pair;
}
public V getVector()
{
return vector;
}
public void setVector(V vector)
{
this.vector = vector;
}
@Override
public int length()
{
return vector.length();
}
@Override
public int nnz()
{
return vector.nnz();
}
@Override
public double get(int index)
{
return vector.get(index);
}
@Override
public void set(int index, double val)
{
vector.set(index, val);
}
@Override
public Vec add(double c)
{
return vector.add(c);
}
@Override
public Vec add(Vec b)
{
b = extractTrueVec(b);
return vector.add(b);
}
@Override
public Vec subtract(Vec b)
{
b = extractTrueVec(b);
return vector.subtract(b);
}
@Override
public Vec pairwiseMultiply(Vec b)
{
b = extractTrueVec(b);
return vector.pairwiseMultiply(b);
}
@Override
public Vec multiply(double c)
{
return vector.multiply(c);
}
@Override
public void multiply(double c, Matrix A, Vec b)
{
vector.multiply(c, A, b);
}
@Override
public Vec pairwiseDivide(Vec b)
{
b = extractTrueVec(b);
return vector.pairwiseDivide(b);
}
@Override
public Vec divide(double c)
{
return vector.divide(c);
}
@Override
public void mutableAdd(double c)
{
vector.mutableAdd(c);
}
@Override
public void mutableAdd(Vec b)
{
b = extractTrueVec(b);
vector.mutableAdd(b);
}
@Override
public void mutableSubtract(Vec b)
{
b = extractTrueVec(b);
vector.mutableSubtract(b);
}
@Override
public void mutablePairwiseMultiply(Vec b)
{
b = extractTrueVec(b);
vector.mutablePairwiseDivide(b);
}
@Override
public void mutableMultiply(double c)
{
vector.mutableMultiply(c);
}
@Override
public void mutablePairwiseDivide(Vec b)
{
b = extractTrueVec(b);
vector.mutablePairwiseDivide(b);
}
@Override
public void mutableDivide(double c)
{
vector.mutableDivide(c);
}
@Override
public Vec sortedCopy()
{
return vector.sortedCopy();
}
@Override
public double min()
{
return vector.min();
}
@Override
public double max()
{
return vector.max();
}
@Override
public double sum()
{
return vector.sum();
}
@Override
public double mean()
{
return vector.mean();
}
@Override
public double standardDeviation()
{
return vector.standardDeviation();
}
@Override
public double variance()
{
return vector.variance();
}
@Override
public double median()
{
return vector.median();
}
@Override
public double skewness()
{
return vector.skewness();
}
@Override
public double kurtosis()
{
return vector.kurtosis();
}
@Override
public Vec clone()
{
return new VecPaired(vector.clone(), pair);
}
@Override
public Vec normalized()
{
return vector.normalized();
}
@Override
public void normalize()
{
vector.normalize();
}
@Override
public double pNormDist(double p, Vec y)
{
y = extractTrueVec(y);
return vector.pNormDist(p, y);
}
@Override
public double pNorm(double p)
{
return vector.pNorm(p);
}
@Override
public double dot(Vec v)
{
v = extractTrueVec(v);
return this.vector.dot(v);
}
@Override
public String toString()
{
return vector.toString();
}
@Override
public boolean equals(Object obj)
{
return vector.equals(obj);
}
@Override
public boolean equals(Object obj, double range)
{
return vector.equals(obj, range);
}
@Override
public double[] arrayCopy()
{
return vector.arrayCopy();
}
@Override
public void mutableAdd(double c, Vec b)
{
b = extractTrueVec(b);
this.vector.mutableAdd(c, b);
}
@Override
public Iterator<IndexValue> getNonZeroIterator(int start)
{
return extractTrueVec(vector).getNonZeroIterator(start);
}
/**
* This method is used assuming multiple VecPaired are used together. The
* implementation of the vector may have logic to handle the case that
* the other vector is of the same type. This will go through every layer
* of VecPaired to return the final base vector.
*
* @param b a Vec, that may or may not be an instance of {@link VecPaired}
* @return the final Vec backing b, which may be b itself.
*/
public static Vec extractTrueVec(Vec b)
{
while(b instanceof VecPaired)
b = ((VecPaired) b).getVector();
return b;
}
public static <V extends Vec, P extends Comparable<P>> Comparator<VecPaired<V, P>> vecPairedComparator()
{
Comparator<VecPaired<V, P>> comp = new Comparator<VecPaired<V, P>>() {
@Override
public int compare(VecPaired<V, P> o1, VecPaired<V, P> o2)
{
return o1.getPair().compareTo(o2.getPair());
}
};
return comp;
};
@Override
public int hashCode()
{
return vector.hashCode();
}
@Override
public boolean isSparse()
{
return vector.isSparse();
}
@Override
public void setLength(int length)
{
vector.setLength(length);
}
}
| 6,558 | 17.476056 | 109 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/VecPairedComparable.java | package jsat.linear;
/**
* Utility class for using {@link VecPaired} when the paired value is comparable
* , and the vectors need to be sorted based on their paired value. This class
* performs exactly the same, and its only modification is that it is comparable
* based on the paired object type.
*
* @author Edward Raff
*/
public class VecPairedComparable<V extends Vec, P extends Comparable<P>> extends VecPaired<V, P> implements Comparable<VecPairedComparable<V, P>>
{
private static final long serialVersionUID = -7061543870162459467L;
public VecPairedComparable(V v, P p)
{
super(v, p);
}
@Override
public int compareTo(VecPairedComparable<V, P> o)
{
return this.getPair().compareTo(o.getPair());
}
}
| 764 | 26.321429 | 145 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/VecWithNorm.java | package jsat.linear;
import java.util.Iterator;
/**
* A wrapper for a vector that allows for transparent tracking of the 2-norm of
* the base vector. This class is meant primarily for use when most updates are
* done by sparse vectors accumulated to a single dense vector. If there are
* only O(s) non zero values, updating the norm can be done in O(s) time. If
* most updates will be done by dense vectors, this wrapper may not give any
* performance improvements. <br>
* The norm is obtained by calling {@link #pNorm(double) }. The original vector
* can be obtained by calling {@link #getBase() }. The exact values returned for
* the norm may differ slightly due to numerical issues.
*
* @author Edward Raff
*/
public class VecWithNorm extends Vec
{
private static final long serialVersionUID = 3888178071694466561L;
final private Vec base;
private double normSqrd;
/**
* Creates a wrapper around the base vector that will update the norm of the
* vector
* @param base the vector to use as the base value
* @param norm the initial value of the norm
*/
public VecWithNorm(Vec base, double norm)
{
this.base = base;
this.normSqrd = norm*norm;
}
/**
* Creates a wrapper around the base vector that will update the norm of the
* vector
* @param base the vector to use as the base value
*/
public VecWithNorm(Vec base)
{
this(base, base.pNorm(2));
}
/**
* Return the base vector that is having its norm tracked
* @return the base vector that is having its norm tracked
*/
public Vec getBase()
{
return base;
}
@Override
public double pNorm(double p)
{
if(p == 2)
return Math.sqrt(normSqrd);
return base.pNorm(p);
}
@Override
public int length()
{
return base.length();
}
@Override
public double get(int index)
{
return base.get(index);
}
@Override
public void set(int index, double val)
{
double old = base.get(index);
normSqrd += -(old*old)+(val*val);
base.set(index, val);
}
@Override
public boolean isSparse()
{
return base.isSparse();
}
@Override
public VecWithNorm clone()
{
return new VecWithNorm(this.base.clone(), Math.sqrt(normSqrd));
}
@Override
public void mutableAdd(double c)
{
//TODO this can be improved for scenarios where the base vector is sparse, but that should be uncommon
for(int i = 0; i < base.length(); i++)
{
double old = base.get(i);
double toAdd = c;
normSqrd += toAdd*(toAdd+2*old);
}
base.mutableAdd(c);
}
@Override
public void mutableAdd(double c, Vec b)
{
for(IndexValue iv : b)
{
double old = base.get(iv.getIndex());
double toAdd = c*iv.getValue();
normSqrd += toAdd*(toAdd+2*old);
}
base.mutableAdd(c, b);
}
@Override
public void mutablePairwiseMultiply(Vec b)
{
//if b is sparse or dense its going to need updates to every value.
//migth as well jsut refresh
base.mutablePairwiseMultiply(b);
normSqrd = Math.pow(base.pNorm(2), 2);
}
@Override
public void mutableMultiply(double c)
{
normSqrd *= c*c;
base.mutableMultiply(c);
}
@Override
public void mutablePairwiseDivide(Vec b)
{
//if b is sparse or dense its going to need updates to every value.
//migth as well just refresh
base.mutablePairwiseDivide(b);
normSqrd = Math.pow(base.pNorm(2), 2);
}
@Override
public void mutableDivide(double c)
{
normSqrd /= c*c;
}
@Override
public void zeroOut()
{
normSqrd = 0;
base.zeroOut();
}
@Override
public int nnz()
{
return base.nnz();
}
@Override
public Iterator<IndexValue> getNonZeroIterator()
{
return base.getNonZeroIterator();
}
@Override
public Iterator<IndexValue> getNonZeroIterator(int start)
{
return base.getNonZeroIterator(start);
}
@Override
public void setLength(int length)
{
//b/c addition or removal of zeros does not impact norm, this is easy
base.setLength(length);
}
}
| 4,472 | 23.178378 | 111 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/ChebyshevDistance.java |
package jsat.linear.distancemetrics;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.linear.Vec;
/**
* Chebyshev Distance is the L<sub>∞</sub> norm.
*
* @author Edward Raff
*/
public class ChebyshevDistance implements DistanceMetric
{
private static final long serialVersionUID = 2528153647402824790L;
@Override
public double dist(Vec a, Vec b)
{
if(a.length() != b.length())
throw new ArithmeticException("Vectors must have the same length");
double max = 0;
for(int i = 0; i < a.length(); i++)
max = Math.max(max, Math.abs(a.get(i)-b.get(i)));
return max;
}
@Override
public boolean isSymmetric()
{
return true;
}
@Override
public boolean isSubadditive()
{
return true;
}
@Override
public boolean isIndiscemible()
{
return true;
}
@Override
public double metricBound()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String toString()
{
return "Chebyshev Distance";
}
@Override
public ChebyshevDistance clone()
{
return new ChebyshevDistance();
}
}
| 1,259 | 17.529412 | 79 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/CosineDistance.java |
package jsat.linear.distancemetrics;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.linear.Vec;
import jsat.utils.DoubleList;
import jsat.utils.FakeExecutor;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.ParallelUtils;
/**
* The Cosine Distance is a adaption of the Cosine Similarity's range from
* [-1, 1] into the range [0, 1]. Where 0 means two vectors are the same, and 1
* means they are completely different.
*
* @author Edward Raff
*/
public class CosineDistance implements DistanceMetric
{
/*
* NOTE: Math.min(val, 1) is used because numerical instability can cause
* slightly larger values than 1 when the values are extremly close to
* eachother. In this case, it would cause a negative value in the sqrt of
* the cosineToDinstance calculation, resulting in a NaN. So the max is used
* to avoid this.
*/
private static final long serialVersionUID = -6475546704095989078L;
@Override
public double dist(Vec a, Vec b)
{
if(a.length() != b.length())
throw new ArithmeticException("vectors a and b are of differeing legnths " + a.length() + " and " + b.length());
/*
* a dot b / (2Norm(a) * 2Norm(b)) will return a value in the range -1 to 1
* -1 means they are completly opposite
*/
double denom = a.pNorm(2) * b.pNorm(2);
if(denom == 0)
return cosineToDistance(-1);
return cosineToDistance(Math.min(a.dot(b) / denom, 1));
}
@Override
public boolean isSymmetric()
{
return true;
}
@Override
public boolean isSubadditive()
{
return true;
}
@Override
public boolean isIndiscemible()
{
return true;
}
@Override
public double metricBound()
{
return 1;
}
@Override
public String toString()
{
return "Cosine Distance";
}
@Override
public CosineDistance clone()
{
return new CosineDistance();
}
@Override
public boolean supportsAcceleration()
{
return true;
}
@Override
public List<Double> getAccelerationCache(List<? extends Vec> vecs, boolean parallel)
{
//Store the pnorms in the cache
double[] cache = new double[vecs.size()];
ParallelUtils.run(parallel, vecs.size(), (start, end) ->
{
for(int i = start; i < end; i++)
cache[i] = vecs.get(i).pNorm(2);
});
return DoubleList.view(cache, vecs.size());
}
@Override
public double dist(int a, int b, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return dist(vecs.get(a), vecs.get(b));
double denom = cache.get(a)*cache.get(b);
if(denom == 0)
return cosineToDistance(-1);
return cosineToDistance(Math.min(vecs.get(a).dot(vecs.get(b)) / denom, 1));
}
@Override
public double dist(int a, Vec b, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return dist(vecs.get(a), b);
double denom = cache.get(a)*b.pNorm(2);
if(denom == 0)
return cosineToDistance(-1);
return cosineToDistance(Math.min(vecs.get(a).dot(b) / denom, 1));
}
@Override
public List<Double> getQueryInfo(Vec q)
{
DoubleList qi = new DoubleList(1);
qi.add(q.pNorm(2));
return qi;
}
@Override
public double dist(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return dist(vecs.get(a), b);
double denom = cache.get(a)*qi.get(0);
if(denom == 0)
return cosineToDistance(-1);
return cosineToDistance(Math.min(vecs.get(a).dot(b) / denom, 1));
}
/**
* This method converts the cosine distance in [-1, 1] to a valid distance
* metric in the range [0, 1]
* @param cosAngle the cosine similarity in [-1, 1]
* @return the distance metric for the cosine value
*/
public static double cosineToDistance(double cosAngle)
{
return Math.sqrt(0.5*(1-cosAngle));
}
/**
* This method converts the distance obtained with
* {@link #cosineToDistance(double) } back into the cosine angle
* @param dist the distance value in [0, 1]
* @return the cosine angle
*/
public static double distanceToCosine(double dist)
{
return 1-2*(dist*dist);
}
}
| 4,700 | 26.491228 | 124 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/CosineDistanceNormalized.java | package jsat.linear.distancemetrics;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.linear.Vec;
/**
* This distance metric returns the same cosine distance as
* {@link CosineDistance}. This implementation assumes that all vectors being
* passed in for distance computations have already been L2 normalized. This
* means the distance computation can be done more efficiently, but the results
* will be incorrect if the inputs have not already been normalized. <br>
* The word Normalized is postfixed to the name to avoid confusion, as many
* might assume "Normalized-CosineDistance" would mean a cosine distance with
* some form of additional normalization.
*
* @author Edward Raff
*/
public class CosineDistanceNormalized implements DistanceMetric
{
/*
* NOTE: Math.min(val, 1) is used because numerical instability can cause
* slightly larger values than 1 when the values are extremly close to
* eachother. In this case, it would cause a negative value in the sqrt of
* the cosineToDinstance calculation, resulting in a NaN. So the max is used
* to avoid this.
*/
private static final long serialVersionUID = -4041803247001806577L;
@Override
public double dist(Vec a, Vec b)
{
return CosineDistance.cosineToDistance(Math.min(a.dot(b), 1));
}
@Override
public boolean isSymmetric()
{
return true;
}
@Override
public boolean isSubadditive()
{
return true;
}
@Override
public boolean isIndiscemible()
{
return true;
}
@Override
public double metricBound()
{
return 1;
}
@Override
public String toString()
{
return "Cosine Distance (Normalized)";
}
@Override
public CosineDistanceNormalized clone()
{
return new CosineDistanceNormalized();
}
}
| 1,917 | 24.918919 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/DenseSparseMetric.java | package jsat.linear.distancemetrics;
import jsat.linear.Vec;
/**
* Many algorithms require computing the distances from a small set of points to
* many other points. In these scenarios, if the small set of points contain
* dense vectors - and the large set contain sparse vectors, a large amount of
* unnecessary computation may be done. A {@link DistanceMetric} that implements
* this interface indicates that it supports more efficient computation of the
* distances in these scenarios. <br>
* A distance metric that can efficiently handle dense to sparse distance
* computations has no reason to implement this interface.
*
* @author Edward Raff
*/
public interface DenseSparseMetric extends DistanceMetric
{
/**
* Computes a summary constant value for the vector that is based on the
* distance metric in use. This value will be used to perform efficient
* dense to sparse computations.
*
* @param vec the vector that will be used in many distance computations
* @return the summary value for the vector
*/
public double getVectorConstant(Vec vec);
/**
* Efficiently computes the distance from one main vector that is used many
* times, to some sparse target vector. If the target vector dose not return
* true for {@link Vec#isSparse() }, the distance will be calculated using
* {@link #dist(jsat.linear.Vec, jsat.linear.Vec) } instead.
*
* @param summaryConst the summary constant for the main vector obtained
* with {@link #getVectorConstant(jsat.linear.Vec) }
* @param main the main vector the summary constant is for
* @param target the target vector to compute the distance to
* @return the distance between the two vectors dist(main, target)
*/
public double dist(double summaryConst, Vec main, Vec target);
}
| 1,861 | 42.302326 | 81 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/DistanceCounter.java | /*
* Copyright (C) 2017 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.linear.distancemetrics;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import jsat.linear.Vec;
/**
* This class exists primarily as a sanity/benchmarking utility. It takes a
* given base distance metric, which will be used as the actual method of
* measuring distances. This class will count how many times a distance
* calculation was queried. This class is thread safe. <br>
* NOTE: all clones of this object will share the same counter.
*
* @author Edward Raff <[email protected]>
*/
public class DistanceCounter implements DistanceMetric
{
private DistanceMetric base;
private AtomicLong counter;
/**
* Creates a new distance counter to wrap the given base metric
* @param base the base distance measure to use
*/
public DistanceCounter(DistanceMetric base)
{
this.base = base;
this.counter = new AtomicLong();
}
/**
* Copies the given distance counter, while sharing the same underlying
* counter between the original and this new object.
*
* @param toCopy the object to get a copy of
*/
public DistanceCounter(DistanceCounter toCopy)
{
this.base = toCopy.base.clone();
this.counter = toCopy.counter;
}
/**
*
* @return the number of distance calls that have occurred
*/
public long getCallCount()
{
return counter.get();
}
/**
* Resets the distance counter calls to zero.
*/
public void resetCounter()
{
counter.set(0);
}
@Override
public double dist(Vec a, Vec b)
{
counter.incrementAndGet();
return base.dist(a, b);
}
@Override
public boolean isSymmetric()
{
return base.isSymmetric();
}
@Override
public boolean isSubadditive()
{
return base.isSubadditive();
}
@Override
public boolean isIndiscemible()
{
return base.isIndiscemible();
}
@Override
public double metricBound()
{
return base.metricBound();
}
@Override
public boolean supportsAcceleration()
{
return base.supportsAcceleration();
}
@Override
public List<Double> getAccelerationCache(List<? extends Vec> vecs, boolean parallel)
{
return base.getAccelerationCache(vecs, parallel);
}
@Override
public double dist(int a, int b, List<? extends Vec> vecs, List<Double> cache)
{
counter.incrementAndGet();
return base.dist(a, b, vecs, cache);
}
@Override
public double dist(int a, Vec b, List<? extends Vec> vecs, List<Double> cache)
{
counter.incrementAndGet();
return base.dist(a, b, vecs, cache);
}
@Override
public List<Double> getQueryInfo(Vec q)
{
return base.getQueryInfo(q);
}
@Override
public double dist(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache)
{
counter.incrementAndGet();
return base.dist(a, b, qi, vecs, cache);
}
@Override
public DistanceCounter clone()
{
return new DistanceCounter(this);
}
}
| 3,904 | 24.522876 | 99 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/DistanceMetric.java |
package jsat.linear.distancemetrics;
import java.io.Serializable;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.linear.Vec;
/**
* A distance metric defines the distance between two points in a metric space.
* There are three necessary properties for a metric to be valid,
* {@link #isSymmetric() symmetry}, {@link #isIndiscemible() indisceribility},
* and the {@link #isSubadditive() triangle inequality } . A metric that does
* not meet all, (or none) or these properties is called a pseudo-metric. Many
* learning algorithms rely on these properties to accelerate computations,
* though may not need all the properties to hold.
* <br><br>
* A metric may support the use of a list of pre-computed information to
* accelerate distance computations between points, which can be checked using
* the {@link #supportsAcceleration() } method. The associated methods are
* defined such that the cache calls can be used in a seamless way that will
* automatically invoke the caching behavior when supported. Simply initiate
* with <br>
* {@code List<Double> distCache = dm.getAccelerationCache(vecList);} <br>
* to initiate the cache, if not supported - null will be returned, which is
* allowed when calling<br>
* {@code double dist = dm.dist(indx1, indx2, vecList, distCache);}<br>
* Null is used as a special case for {@code distCache}, at which point the
* implementation will call the standard
* {@link #dist(jsat.linear.Vec, jsat.linear.Vec) } using the list and indices.
* The other cache accelerated methods behave in the same way, including
* {@link #getQueryInfo(jsat.linear.Vec) }<br>
* Using this set up, no branching or special case code is necessary to
* automatically use the acceleration capabilities of supported distance metrics.
*
* @author Edward Raff
*/
public interface DistanceMetric extends Cloneable, Serializable
{
/**
* Computes the distance between 2 vectors.
* The smaller the value, the closer, and there for,
* more similar, the vectors are. 0 indicates the vectors are the same.
*
* @param a the first vector
* @param b the second vector
* @return the distance between them
*/
public double dist(Vec a, Vec b);
/**
* Returns true if this distance metric obeys the rule that, for any x, y, and z ∈ S <br>
* d(x, y) = d(y, x)
*
* @return true if this distance metric is symmetric, false if it is not
*/
public boolean isSymmetric();
/**
* Returns true if this distance metric obeys the rule that, for any x, y, and z ∈ S <br>
* d(x, z) ≤ d(x, y) + d(y, z)
*
* @return true if this distance metric supports the triangle inequality, false if it does not.
*/
public boolean isSubadditive();
/**
* Returns true if this distance metric obeys the rule that, for any x and y ∈ S <br>
* d(x, y) = 0 if and only if x = y
* @return true if this distance metric is indicemible, false otherwise.
*/
public boolean isIndiscemible();
/**
* Returns true if this distance method obeys all the rules required to be a valid metric.
* @return true if this distance method obeys all the rules required to be a valid metric.
*/
default public boolean isValidMetric()
{
return isSymmetric() && isSubadditive() && isIndiscemible();
}
/**
* All metrics must return values greater than or equal to 0.
* The upper bound on the value returned is different for
* different metrics. This method returns the theoretical
* maximal value that could be returned by this distance
* metric. That means {@link Double#POSITIVE_INFINITY }
* is a valid return value.
*
* @return the maximal distance for any two points in that could exist by this distance metric.
*/
public double metricBound();
/**
* Indicates if this distance metric supports building an acceleration cache
* using the {@link #getAccelerationCache(java.util.List) } and associated
* distance methods. By default this method will return {@code false}. If
* {@code true}, then a cache can be obtained from this distance metric and
* used in conjunction with {@link #dist(int, jsat.linear.Vec,
* java.util.List, java.util.List) } and {@link #dist(int, int,
* java.util.List, java.util.List) } to perform distance computations.
* @return {@code true} if cache acceleration is supported for this metric,
* {@code false} otherwise.
*/
default public boolean supportsAcceleration()
{
return false;
}
/**
* Returns a cache of double values associated with the given list of
* vectors in the given order. This can be used by the distance metric to
* increase runtime at the cost of memory. This is an optional method.
* <br> If this metric does not support acceleration, {@code null} will be
* returned.
*
* @param vecs the list of vectors to build an acceleration cache for
* @return the list of double for the cache
*/
default public List<Double> getAccelerationCache(List<? extends Vec> vecs)
{
return getAccelerationCache(vecs, false);
}
/**
* Returns a cache of double values associated with the given list of
* vectors in the given order. This can be used by the distance metric to
* increase runtime at the cost of memory. This is an optional method.
* <br> If this metric does not support acceleration, {@code null} will be
* returned.
*
* @param vecs the list of vectors to build an acceleration cache for
* @param parallel {@code true} if multiple threads should be used to
* perform clustering. {@code false} if it should be done in a single
* threaded manner.
* @return the list of double for the cache
*/
default public List<Double> getAccelerationCache(List<? extends Vec> vecs, boolean parallel)
{
return null;
}
/**
* Computes the distance between 2 vectors in the original list of vectors.
* <br> If the cache input is {@code null}, then
* {@link #dist(jsat.linear.Vec, jsat.linear.Vec) } will be called directly.
* @param a the index of the first vector
* @param b the index of the second vector
* @param vecs the list of vectors used to build the cache
* @param cache the cache associated with the given list of vectors
* @return the distance between the two vectors
*/
default public double dist(int a, int b, List<? extends Vec> vecs, List<Double> cache)
{
return dist(vecs.get(a), vecs.get(b));
}
/**
* Computes the distance between 2 vectors from two different lists of
* vectors.<br> If the cache input is {@code null}, then
* {@link #dist(jsat.linear.Vec, jsat.linear.Vec)} will be called directly.
*
* @param a the index of the first vector from the first list
* @param b the index of the second vector from the second list
* @param vecs_a the first list of vectors used to build the cache
* @param cache_a the cache associated with the first list of vectors
* @param vecs_b the second list of vectors used to build the cache
* @param cache_b the cache associated with the second list of vectors
* @return the distance between the two vectors
*/
default public double dist(int a, int b, List<? extends Vec> vecs_a, List<Double> cache_a, List<? extends Vec> vecs_b, List<Double> cache_b)
{
Vec b_vec = vecs_b.get(b);
List<Double> b_qi = null;
if(cache_b != null)
{
int factor = cache_b.size()/vecs_b.size();
b_qi = cache_b.subList(b*factor, (b+1)*factor);
}
return dist(a, b_vec, b_qi, vecs_a, cache_a);
}
/**
* Computes the distance between one vector in the original list of vectors
* with that of another vector not from the original list.
* <br> If the cache input is {@code null}, then
* {@link #dist(jsat.linear.Vec, jsat.linear.Vec) } will be called directly.
* @param a the index of the vector in the cache
* @param b the other vector
* @param vecs the list of vectors used to build the cache
* @param cache the cache associated with the given list of vectors
* @return the distance between the two vectors
*/
default public double dist(int a, Vec b, List<? extends Vec> vecs, List<Double> cache)
{
return dist(a, b, getQueryInfo(b), vecs, cache);
}
/**
* Pre computes query information that would have be generated if the query
* was a member of the original list of vectors when calling
* {@link #getAccelerationCache(java.util.List) } . This can then be used if
* a large number of distance computations are going to be done against
* points in the original set for a point that is outside the original space.
* <br><br>
* If this metric does not support acceleration, {@code null} will be
* returned.
*
* @param q the query point to generate cache information for
* @return the cache information for the query point
*/
default public List<Double> getQueryInfo(Vec q)
{
return null;
}
/**
* Computes the distance between one vector in the original list of vectors
* with that of another vector not from the original list, but had
* information generated by {@link #getQueryInfo(jsat.linear.Vec) }.
* <br> If the cache input is {@code null}, then
* {@link #dist(jsat.linear.Vec, jsat.linear.Vec) } will be called directly.
* @param a the index of the vector in the cache
* @param b the other vector
* @param qi the query information about b
* @param vecs the list of vectors used to build the cache
* @param cache the cache associated with the given list of vectors
* @return the distance between the two vectors
*/
default public double dist(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache)
{
return dist(vecs.get(a), b);
}
/**
* Returns a descriptive name of the Distance Metric in use
* @return the name of this metric
*/
@Override
public String toString();
public DistanceMetric clone();
}
| 10,439 | 41.612245 | 145 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/EuclideanDistance.java |
package jsat.linear.distancemetrics;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.linear.IndexValue;
import jsat.linear.Vec;
import jsat.utils.DoubleList;
import jsat.utils.FakeExecutor;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.ParallelUtils;
/**
* Euclidean Distance is the L<sub>2</sub> norm.
*
* @author Edward Raff
*/
public class EuclideanDistance implements DenseSparseMetric
{
private static final long serialVersionUID = 8155062933851345574L;
@Override
public double dist(Vec a, Vec b)
{
return a.pNormDist(2, b);
}
@Override
public boolean isSymmetric()
{
return true;
}
@Override
public boolean isSubadditive()
{
return true;
}
@Override
public boolean isIndiscemible()
{
return true;
}
@Override
public double metricBound()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String toString()
{
return "Euclidean Distance";
}
@Override
public EuclideanDistance clone()
{
return new EuclideanDistance();
}
@Override
public double getVectorConstant(Vec vec)
{
/* Returns the sum of squarred differences if the other vec had been all
* zeros. That means this is one sqrt away from being the euclidean
* distance to the zero vector.
*/
return Math.pow(vec.pNorm(2), 2.0);
}
@Override
public double dist(double summaryConst, Vec main, Vec target)
{
if(!target.isSparse())
return dist(main, target);
/**
* Summary contains the squared differences to the zero vec, only a few
* of the indices are actually non zero - we correct those values
*/
double addBack = 0.0;
double takeOut = 0.0;
for(IndexValue iv : target)
{
int i = iv.getIndex();
double mainVal = main.get(i);
takeOut += Math.pow(main.get(i), 2);
addBack += Math.pow(main.get(i)-iv.getValue(), 2.0);
}
return Math.sqrt(Math.max(summaryConst-takeOut+addBack, 0));//Max incase of numerical issues
}
@Override
public boolean supportsAcceleration()
{
return true;
}
@Override
public List<Double> getAccelerationCache(List<? extends Vec> vecs, boolean parallel)
{
//Store the pnorms in the cache
double[] cache = new double[vecs.size()];
ParallelUtils.run(parallel, vecs.size(), (start, end) ->
{
for(int i = start; i < end; i++)
{
Vec v = vecs.get(i);
cache[i] = v.dot(v);
}
});
return DoubleList.view(cache, vecs.size());
}
@Override
public double dist(int a, int b, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return dist(vecs.get(a), vecs.get(b));
return Math.sqrt(Math.max(cache.get(a)+cache.get(b)-2*vecs.get(a).dot(vecs.get(b)), 0));//Max incase of numerical issues
}
@Override
public double dist(int a, Vec b, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return dist(vecs.get(a), b);
return Math.sqrt(Math.max(cache.get(a)+b.dot(b)-2*vecs.get(a).dot(b), 0));//Max incase of numerical issues
}
@Override
public List<Double> getQueryInfo(Vec q)
{
DoubleList qi = new DoubleList(1);
qi.add(q.dot(q));
return qi;
}
@Override
public double dist(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return dist(vecs.get(a), b);
return Math.sqrt(Math.max(cache.get(a)+qi.get(0)-2*vecs.get(a).dot(b), 0));//Max incase of numerical issues
}
}
| 4,046 | 24.77707 | 128 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/JaccardDistance.java | /*
* Copyright (C) 2017 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.linear.distancemetrics;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.distributions.kernels.KernelTrick;
import jsat.linear.IndexValue;
import jsat.linear.Vec;
import jsat.parameters.Parameter;
import static java.lang.Math.*;
import java.util.Collections;
/**
* This class implements both the weighted Jaccard Distance and the standard
* Jaccard distance. If a input is given with only binary 0 or 1 values, the
* weighted Jaccard is equivalent to the un-weighted version.<br>
* For the weighted Jaccard version, all values less than or equal to zero will
* be treated as zero. For the unweighted versions, all non-zero values will
* behave as if their value is 1.0.
* <br>
* The Jaccard Distance and similarity are intertwined, and so this method is
* both a distance metric and kernel trick.
*
* @author Edward Raff
*/
public class JaccardDistance implements DistanceMetric, KernelTrick
{
private boolean weighted;
/**
* Creates a new Jaccard similarity, which can be weighted or unweighted.
*
* @param weighted {@code true} to use the weighted Jaccard, {@code false}
* otherwise.
*/
public JaccardDistance(boolean weighted)
{
this.weighted = weighted;
}
/**
* Creates a new Weighted Jaccard distance / similarity
*/
public JaccardDistance()
{
this(true);
}
@Override
public double dist(Vec a, Vec b)
{
return 1-eval(a, b);
}
@Override
public boolean isSymmetric()
{
return true;
}
@Override
public boolean isSubadditive()
{
return true;
}
@Override
public boolean isIndiscemible()
{
return true;
}
@Override
public double metricBound()
{
return 1.0;
}
@Override
public boolean supportsAcceleration()
{
return false;
}
@Override
public List<Double> getQueryInfo(Vec q)
{
return null;
}
@Override
public double eval(Vec a, Vec b)
{
double numer = 0, denom = 0;
Iterator<IndexValue> a_iter = a.getNonZeroIterator();
Iterator<IndexValue> b_iter = b.getNonZeroIterator();
IndexValue a_val = a_iter.hasNext() ? a_iter.next() : null;
IndexValue b_val = b_iter.hasNext() ? b_iter.next() : null;
while (a_val != null && b_val != null)
{
if (weighted)
{
if (a_val.getIndex() == b_val.getIndex())
{
numer += max(min(a_val.getValue(), b_val.getValue()), 0.0);
denom += max(max(a_val.getValue(), b_val.getValue()), 0.0);
a_val = a_iter.hasNext() ? a_iter.next() : null;
b_val = b_iter.hasNext() ? b_iter.next() : null;
}
else if(a_val.getIndex() < b_val.getIndex())
{
denom += max(a_val.getValue(), 0.0);
a_val = a_iter.hasNext() ? a_iter.next() : null;
}
else//b had a lower index
{
denom += max(b_val.getValue(), 0.0);
b_val = b_iter.hasNext() ? b_iter.next() : null;
}
}
else//unweighted variant
{
if (a_val.getIndex() == b_val.getIndex())
{
numer++;
denom++;
a_val = a_iter.hasNext() ? a_iter.next() : null;
b_val = b_iter.hasNext() ? b_iter.next() : null;
}
else if(a_val.getIndex() < b_val.getIndex())
{
denom++;
a_val = a_iter.hasNext() ? a_iter.next() : null;
}
else//b had a lower index
{
denom++;
b_val = b_iter.hasNext() ? b_iter.next() : null;
}
}
}
//catch straglers
Iterator<IndexValue> finalIter = a_val != null ? a_iter : b_iter;
IndexValue finalVal = a_val != null ? a_val : b_val;
while(finalVal != null)
{
if(weighted)
denom += max(finalVal.getValue(), 0.0);
else
denom++;
finalVal = finalIter.hasNext() ? finalIter.next() : null;
}
return numer / denom;
}
@Override
public JaccardDistance clone()
{
return new JaccardDistance(weighted);
}
@Override
public void addToCache(Vec newVec, List<Double> cache)
{
//NOP, nothing to do
}
@Override
public double eval(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache)
{
return eval(vecs.get(a), b);
}
@Override
public double eval(int a, int b, List<? extends Vec> trainingSet, List<Double> cache)
{
return eval(trainingSet.get(a), trainingSet.get(b));
}
@Override
public double evalSum(List<? extends Vec> finalSet, List<Double> cache, double[] alpha, Vec y, int start, int end)
{
return evalSum(finalSet, cache, alpha, y, getQueryInfo(y), start, end);
}
@Override
public double evalSum(List<? extends Vec> finalSet, List<Double> cache, double[] alpha, Vec y, List<Double> qi, int start, int end)
{
double sum = 0;
for(int i = start; i < end; i++)
if(alpha[i] != 0)
sum += alpha[i] * eval(i, y, qi, finalSet, cache);
return sum;
}
@Override
public boolean normalized()
{
return true;
}
@Override
public List<Double> getAccelerationCache(List<? extends Vec> trainingSet)
{
return null;
}
}
| 6,622 | 27.670996 | 135 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/KernelDistance.java |
package jsat.linear.distancemetrics;
import java.util.List;
import jsat.classifiers.knn.NearestNeighbour;
import jsat.distributions.kernels.*;
import jsat.linear.Vec;
/**
* Creates a distance metric from a given kernel trick.
* For the distance metric to be valid, the kernel used
* must be positive definite.
*
* @author Edward Raff
*/
public class KernelDistance implements DistanceMetric
{
private static final long serialVersionUID = -1553315486668768024L;
private KernelTrick kf;
/**
* Creates a distane metric from the given kernel. For the metric to be valid, the kernel must be positive definite. This means that
* <br><br>
* ∀ c<sub>i</sub> ∈ ℜ , x<sub>i</sub> ∈ ℜ<sup>d</sup> <br>
* ∑<sub>i, j = 1</sub><sup>m</sup> c<sub>i</sub> c<sub>j</sub> K(x<sub>i</sub>, x<sub>j</sub>) ≥ 0
* @param kf
*/
public KernelDistance(KernelTrick kf)
{
this.kf = kf;
}
/**
* Returns the square of the distance function expanded as kernel methods.
* <br>
* d<sup>2</sup>(x,y) = K(x,x) - 2*K(x,y) + K(y,y)
*
* <br><br>
* Special Notes:<br>
* The use of {@link RBFKernel} or {@link PolynomialKernel} of degree 1
* in the {@link NearestNeighbour} classifier will degenerate into the
* normal nearest neighbor algorithm.
*
* @param a the first vector
* @param b the second vector
* @return the distance metric based on a kernel function
*/
@Override
public double dist(Vec a, Vec b)
{
return kf.eval(a, a) - 2*kf.eval(a, b) + kf.eval(b, b);
}
@Override
public boolean isSymmetric()
{
return true;
}
@Override
public boolean isSubadditive()
{
return true;
}
@Override
public boolean isIndiscemible()
{
return true;
}
@Override
public double metricBound()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String toString()
{
return "Kernel (" + kf + ") Distance";
}
@Override
public KernelDistance clone()
{
return new KernelDistance(kf);
}
//TODO modify CacheAccerleratedKernel to work with lists...
@Override
public boolean supportsAcceleration()
{
return false;
}
@Override
public List<Double> getAccelerationCache(List<? extends Vec> vecs, boolean parallel)
{
return null;
}
@Override
public double dist(int a, int b, List<? extends Vec> vecs, List<Double> cache)
{
return dist(vecs.get(a), vecs.get(b));
}
@Override
public double dist(int a, Vec b, List<? extends Vec> vecs, List<Double> cache)
{
return dist(vecs.get(a), b);
}
@Override
public List<Double> getQueryInfo(Vec q)
{
return null;
}
@Override
public double dist(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache)
{
return dist(vecs.get(a), b);
}
}
| 3,069 | 22.615385 | 136 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/MahalanobisDistance.java |
package jsat.linear.distancemetrics;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.linear.*;
import jsat.regression.RegressionDataSet;
import jsat.utils.concurrent.ParallelUtils;
/**
* The Mahalanobis Distance is a metric that takes into account the variance of the data. This requires
* training the metric with the data set to learn the variance of. The extra work involved adds
* computation time to training and prediction. However, improvements in accuracy can be obtained for
* many data sets. At the same time, the Mahalanobis Distance can also be detrimental to accuracy.
*
* @author Edward Raff
*/
public class MahalanobisDistance extends TrainableDistanceMetric
{
private static final long serialVersionUID = 7878528119699276817L;
private boolean reTrain;
/**
* The inverse of the covariance matrix
*/
private Matrix S;
public MahalanobisDistance()
{
reTrain = true;
}
/**
* Returns <tt>true</tt> if this metric will indicate a need to be retrained
* once it has been trained once. This will mean {@link #needsTraining() }
* will always return true. <tt>false</tt> means the metric will not indicate
* a need to be retrained once it has been trained once.
*
* @return <tt>true</tt> if the data should always be retrained, <tt>false</tt> if it should not.
*/
public boolean isReTrain()
{
return reTrain;
}
/**
* It may be desirable to have the metric trained only once, and use the same parameters
* for all other training sessions of the learning algorithm using the metric. This can
* be controlled through this boolean. Setting <tt>true</tt> if this metric will indicate
* a need to be retrained once it has been trained once. This will mean {@link #needsTraining() }
* will always return true. <tt>false</tt> means the metric will not indicate
* a need to be retrained once it has been trained once.
*
* @param reTrain <tt>true</tt> to make the metric always request retraining, <tt>false</tt> so it will not.
*/
public void setReTrain(boolean reTrain)
{
this.reTrain = reTrain;
}
/**
* Sets the Inverse Covariance Matrix used as the distance matrix by this
* distance metric.
*
* @param S the matrix to use as the distance matrix
*/
public void setInverseCovariance(Matrix S)
{
this.S = S;
}
@Override
public <V extends Vec> void train(List<V> dataSet)
{
train(dataSet, false);
}
@Override
public <V extends Vec> void train(List<V> dataSet, boolean parallel)
{
Vec mean = MatrixStatistics.meanVector(dataSet);
Matrix covariance = MatrixStatistics.covarianceMatrix(mean, dataSet);
LUPDecomposition lup;
SingularValueDecomposition svd;
if(parallel)
lup = new LUPDecomposition(covariance.clone(), ParallelUtils.CACHED_THREAD_POOL);
else
lup = new LUPDecomposition(covariance.clone());
double det = lup.det();
if(Double.isNaN(det) || Double.isInfinite(det) || Math.abs(det) <= 1e-13)//Bad problem, use the SVD instead
{
lup = null;
svd = new SingularValueDecomposition(covariance);
S = svd.getPseudoInverse();
}
else if(parallel)
S = lup.solve(Matrix.eye(covariance.cols()), ParallelUtils.CACHED_THREAD_POOL);
else
S = lup.solve(Matrix.eye(covariance.cols()));
}
@Override
public void train(DataSet dataSet)
{
train(dataSet, false);
}
@Override
public void train(DataSet dataSet, boolean parallel)
{
train(dataSet.getDataVectors(), parallel);
}
@Override
public void train(ClassificationDataSet dataSet)
{
train( (DataSet) dataSet);
}
@Override
public void train(ClassificationDataSet dataSet, boolean parallel)
{
train((DataSet) dataSet, parallel);
}
@Override
public boolean supportsClassificationTraining()
{
return true;
}
@Override
public void train(RegressionDataSet dataSet)
{
train( (DataSet) dataSet);
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
train((DataSet) dataSet, parallel);
}
@Override
public boolean supportsRegressionTraining()
{
return true;
}
@Override
public boolean needsTraining()
{
if(S == null)
return true;
else
return isReTrain();
}
@Override
public double dist(Vec a, Vec b)
{
Vec aMb = a.subtract(b);
return Math.sqrt(aMb.dot(S.multiply(aMb)));
}
@Override
public boolean isSymmetric()
{
return true;
}
@Override
public boolean isSubadditive()
{
return true;
}
@Override
public boolean isIndiscemible()
{
return true;
}
@Override
public double metricBound()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String toString()
{
return "Mahalanobis Distance";
}
@Override
public MahalanobisDistance clone()
{
MahalanobisDistance clone = new MahalanobisDistance();
clone.reTrain = this.reTrain;
if(this.S != null)
clone.S = this.S.clone();
return clone;
}
}
| 5,570 | 25.783654 | 115 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/ManhattanDistance.java |
package jsat.linear.distancemetrics;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.linear.IndexValue;
import jsat.linear.Vec;
/**
* Manhattan Distance is the L<sub>1</sub> norm.
*
* @author Edward Raff
*/
public class ManhattanDistance implements DenseSparseMetric
{
private static final long serialVersionUID = 3028834823742743351L;
@Override
public double dist(Vec a, Vec b)
{
return a.pNormDist(1, b);
}
@Override
public boolean isSymmetric()
{
return true;
}
@Override
public boolean isSubadditive()
{
return true;
}
@Override
public boolean isIndiscemible()
{
return true;
}
@Override
public double metricBound()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String toString()
{
return "Manhattan Distance";
}
@Override
public ManhattanDistance clone()
{
return new ManhattanDistance();
}
@Override
public double getVectorConstant(Vec vec)
{
return vec.pNorm(1);
}
@Override
public double dist(double summaryConst, Vec main, Vec target)
{
if(!target.isSparse())
return dist(main, target);
/**
* Summary contains the differences to the zero vec, only a few
* of the indices are actually non zero - we correct those values
*/
double takeOut = 0.0;
for(IndexValue iv : target)
{
int i = iv.getIndex();
double mainVal = main.get(i);
takeOut += mainVal-Math.abs(mainVal-iv.getValue());
}
return summaryConst-takeOut;
}
}
| 1,731 | 19.139535 | 74 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/MinkowskiDistance.java | package jsat.linear.distancemetrics;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.linear.IndexValue;
import jsat.linear.Vec;
/**
* Minkowski Distance is the L<sub>p</sub> norm.
*
* @author Edward Raff
*/
public class MinkowskiDistance implements DenseSparseMetric
{
private static final long serialVersionUID = 8976696315441171045L;
private double p;
/**
*
* @param p the norm to use as the distance
*/
public MinkowskiDistance(double p)
{
if (p <= 0 || Double.isNaN(p))
throw new ArithmeticException("The pNorm exists only for p > 0");
else if (Double.isInfinite(p))
throw new ArithmeticException("Infinity norm is a special case, use ChebyshevDistance for infinity norm");
setP(p);
}
/**
*
* @param p the norm to use for this metric
*/
public void setP(double p)
{
if(p <= 0 || Double.isNaN(p) || Double.isInfinite(p))
throw new IllegalArgumentException("p must be a positive value, not " + p);
this.p = p;
}
/**
*
* @return the norm to use for this metric.
*/
public double getP()
{
return p;
}
@Override
public double dist(Vec a, Vec b)
{
return a.pNormDist(p, b);
}
@Override
public boolean isSymmetric()
{
return true;
}
@Override
public boolean isSubadditive()
{
return true;
}
@Override
public boolean isIndiscemible()
{
return true;
}
@Override
public double metricBound()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String toString()
{
return "Minkowski Distance (p=" + p + ")";
}
@Override
public MinkowskiDistance clone()
{
return new MinkowskiDistance(p);
}
@Override
public double getVectorConstant(Vec vec)
{
return Math.pow(vec.pNorm(p), p);
}
@Override
public double dist(double summaryConst, Vec main, Vec target)
{
if(!target.isSparse())
return dist(main, target);
/**
* Summary contains the differences^p to the zero vec, only a few
* of the indices are actually non zero - we correct those values
*/
double addBack = 0.0;
double takeOut = 0.0;
for(IndexValue iv : target)
{
int i = iv.getIndex();
double mainVal = main.get(i);
takeOut += Math.pow(mainVal, p);
addBack += Math.pow(mainVal-iv.getValue(), p);
}
return Math.pow(summaryConst-takeOut+addBack, 1/p);
}
}
| 2,711 | 21.229508 | 118 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/NormalizedEuclideanDistance.java | package jsat.linear.distancemetrics;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.datatransform.UnitVarianceTransform;
import jsat.linear.DenseVector;
import jsat.linear.MatrixStatistics;
import jsat.linear.Vec;
import jsat.linear.VecOps;
import jsat.regression.RegressionDataSet;
import jsat.utils.DoubleList;
import jsat.utils.FakeExecutor;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.ParallelUtils;
/**
* Implementation of the Normalized Euclidean Distance Metric. The normalized
* version divides each variable by its standard deviation, and then continues
* as the normal {@link EuclideanDistance}. <br>
* The same results can be achieved by first applying
* {@link UnitVarianceTransform} to a data set before using the
* L2 norm.<br>
* It is equivalent to the {@link MahalanobisDistance} if only the diagonal
* values were used.
*
*
* @author Edward Raff
*/
public class NormalizedEuclideanDistance extends TrainableDistanceMetric
{
private static final long serialVersionUID = 210109457671623688L;
private Vec invStndDevs;
/**
* Creates a new Normalized Euclidean distance metric
*/
public NormalizedEuclideanDistance()
{
}
@Override
public <V extends Vec> void train(List<V> dataSet)
{
Vec mean = MatrixStatistics.meanVector(dataSet);
invStndDevs = new DenseVector(mean.length());
MatrixStatistics.covarianceDiag(mean, invStndDevs , dataSet);
invStndDevs.applyFunction((x)->x*x);
invStndDevs.applyFunction((x)->1/x);
}
@Override
public <V extends Vec> void train(List<V> dataSet, boolean parallel)
{
train(dataSet);
}
@Override
public void train(DataSet dataSet)
{
invStndDevs = dataSet.getColumnMeanVariance()[1];
invStndDevs.applyFunction((x)->x*x);
invStndDevs.applyFunction((x)->1/x);
}
@Override
public void train(DataSet dataSet, boolean parallel)
{
train(dataSet);
}
@Override
public void train(ClassificationDataSet dataSet)
{
train((DataSet)dataSet);
}
@Override
public void train(ClassificationDataSet dataSet, boolean parallel)
{
train(dataSet);
}
@Override
public boolean supportsClassificationTraining()
{
return true;
}
@Override
public void train(RegressionDataSet dataSet)
{
train((DataSet)dataSet);
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
train(dataSet);
}
@Override
public boolean supportsRegressionTraining()
{
return true;
}
@Override
public boolean needsTraining()
{
return invStndDevs == null;
}
@Override
public NormalizedEuclideanDistance clone()
{
NormalizedEuclideanDistance clone = new NormalizedEuclideanDistance();
if(this.invStndDevs != null)
clone.invStndDevs = this.invStndDevs.clone();
return clone;
}
@Override
public double dist(Vec a, Vec b)
{
double r = VecOps.accumulateSum(invStndDevs, a, b, (double x) -> x*x);
return Math.sqrt(r);
}
@Override
public boolean isSymmetric()
{
return true;
}
@Override
public boolean isSubadditive()
{
return true;
}
@Override
public boolean isIndiscemible()
{
return true;
}
@Override
public double metricBound()
{
return Double.POSITIVE_INFINITY;
}
/*
* TODO when moving to java8, convert TrainableDistanceMetric into an
* interface, fix this class up. Then extend WeightedEuclideanDistance
*/
@Override
public boolean supportsAcceleration()
{
return true;
}
@Override
public List<Double> getAccelerationCache(List<? extends Vec> vecs, boolean parallel)
{
//Store the pnorms in the cache
double[] cache = new double[vecs.size()];
ParallelUtils.run(parallel, vecs.size(), (start, end) ->
{
for(int i = start; i < end; i++)
{
Vec v = vecs.get(i);
cache[i] = VecOps.weightedDot(invStndDevs, v, v);
}
});
return DoubleList.view(cache, vecs.size());
}
@Override
public double dist(int a, int b, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return dist(vecs.get(a), vecs.get(b));
return Math.sqrt(cache.get(a)+cache.get(b)-2*VecOps.weightedDot(invStndDevs, vecs.get(a), vecs.get(b)));
}
@Override
public double dist(int a, Vec b, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return dist(vecs.get(a), b);
return Math.sqrt(cache.get(a)+VecOps.weightedDot(invStndDevs, b, b)-2*VecOps.weightedDot(invStndDevs, vecs.get(a), b));
}
@Override
public List<Double> getQueryInfo(Vec q)
{
DoubleList qi = new DoubleList(1);
qi.add(VecOps.weightedDot(invStndDevs, q, q));
return qi;
}
@Override
public double dist(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return dist(vecs.get(a), b);
return Math.sqrt(cache.get(a)+qi.get(0)-2*VecOps.weightedDot(invStndDevs, vecs.get(a), b));
}
}
| 5,665 | 24.522523 | 127 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/PearsonDistance.java |
package jsat.linear.distancemetrics;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.linear.IndexValue;
import jsat.linear.Vec;
/**
* A valid distance metric formed from the Pearson Correlation between two vectors.
* The distance in the range of [0, 1].
*
* @author Edward Raff
*/
public class PearsonDistance implements DistanceMetric
{
private static final long serialVersionUID = 1090726755301934198L;
private boolean bothNonZero;
private boolean absoluteDistance;
/**
* Creates a new standard Pearson Distance that does not ignore zero values
* and anti-correlated values are considered far away.
*/
public PearsonDistance()
{
this(false, false);
}
/**
* Creates a new Pearson Distance object
* @param bothNonZero {@code true} if non zero values should be treated as
* "missing" or "no vote", and will not contribute. But this will not
* change the mean value used. {@code false} produces the standard Pearson value.
* @param absoluteDistance {@code true} to use the absolute correlation, meaning
* correlated and anti-correlated values will have the same distance.
*/
public PearsonDistance(boolean bothNonZero, boolean absoluteDistance)
{
this.bothNonZero = bothNonZero;
this.absoluteDistance = absoluteDistance;
}
@Override
public double dist(Vec a, Vec b)
{
double r = correlation(a, b, bothNonZero);
if(Double.isNaN(r))
return Double.MAX_VALUE;
if(absoluteDistance)
return Math.sqrt(1-r*r);
else
return Math.sqrt((1-r)*0.5);
}
@Override
public boolean isSymmetric()
{
return true;
}
@Override
public boolean isSubadditive()
{
return true;
}
@Override
public boolean isIndiscemible()
{
return true;
}
@Override
public double metricBound()
{
return 1;
}
@Override
public PearsonDistance clone()
{
return new PearsonDistance(bothNonZero, absoluteDistance);
}
/**
* Computes the Pearson correlation between two vectors. If one of the vectors is all zeros, the result is undefined.
* In cases where both are zero vectors, 1 will be returned to indicate they are the same. In cases where one of the
* numerator coefficients is zero, its value will be bumped up to an epsilon to provide a near result. <br>
* <br>
* In cases where {@code bothNonZero} is {@code true}, and the vectors have no overlapping non zero values, 0 will
* be returned.
* @param a the first vector
* @param b the second vector
* @param bothNonZero {@code false} is the normal Pearson correlation. {@code true} will make the computation ignore
* all indexes where one of the values is zero, the mean will be from all non zero values in each vector.
* @return the Pearson correlation in [-1, 1]
*/
public static double correlation(Vec a, Vec b, boolean bothNonZero)
{
final double aMean;
final double bMean;
if(bothNonZero)
{
aMean = a.sum()/a.nnz();
bMean = b.sum()/b.nnz();
}
else
{
aMean = a.mean();
bMean = b.mean();
}
double r = 0;
double aSqrd = 0, bSqrd = 0;
if (a.isSparse() || b.isSparse())
{
Iterator<IndexValue> aIter = a.getNonZeroIterator();
Iterator<IndexValue> bIter = b.getNonZeroIterator();
//if one is empty, then a zero forms on the denomrinator
if (!aIter.hasNext() && !bIter.hasNext())
return 1;
if (!aIter.hasNext() || !bIter.hasNext())
return Double.MAX_VALUE;
IndexValue aCur = null;
IndexValue bCur = null;
boolean newA = true, newB = true;
int lastObservedIndex = -1;
do
{
if (newA)
{
if (!aIter.hasNext())
break;
aCur = aIter.next();
newA = false;
}
if (newB)
{
if (!bIter.hasNext())
break;
bCur = bIter.next();
newB = false;
}
if (aCur.getIndex() == bCur.getIndex())
{
//accumulate skipped positions where both are zero
if(!bothNonZero)
r += aMean * bMean * (aCur.getIndex()-lastObservedIndex - 1);
lastObservedIndex = aCur.getIndex();
double aVal = aCur.getValue() - aMean;
double bVal = bCur.getValue() - bMean;
r += aVal * bVal;
aSqrd += aVal * aVal;
bSqrd += bVal * bVal;
newA = newB = true;
}
else if (aCur.getIndex() > bCur.getIndex())
{
if (!bothNonZero)
{
//accumulate skipped positions where both are zero
r += aMean * bMean * (bCur.getIndex()-lastObservedIndex - 1);
lastObservedIndex = bCur.getIndex();
double bVal = bCur.getValue() - bMean;
r += -aMean * bVal;
bSqrd += bVal * bVal;
}
newB = true;
}
else if (aCur.getIndex() < bCur.getIndex())
{
if (!bothNonZero)
{
//accumulate skipped positions where both are zero
r += aMean * bMean * (aCur.getIndex()-lastObservedIndex - 1);
lastObservedIndex = aCur.getIndex();
double aVal = aCur.getValue() - aMean;
r += aVal * -bMean;
aSqrd += aVal * aVal;
}
newA = true;
}
}
while (true);
if (!bothNonZero)
{
//only one of the loops bellow will execute
while (!newA || (newA && aIter.hasNext()))
{
if(newA)
aCur = aIter.next();
//accumulate skipped positions where both are zero
r += aMean * bMean * (aCur.getIndex()-lastObservedIndex - 1);
lastObservedIndex = aCur.getIndex();
double aVal = aCur.getValue() - aMean;
r += aVal * -bMean;
aSqrd += aVal * aVal;
newA = true;
}
while (!newB || (newB && bIter.hasNext()))
{
if(newB)
bCur = bIter.next();
//accumulate skipped positions where both are zero
r += aMean * bMean * (bCur.getIndex()-lastObservedIndex - 1);
lastObservedIndex = bCur.getIndex();
double bVal = bCur.getValue() - bMean;
r += -aMean * bVal;
bSqrd += bVal * bVal;
newB = true;
}
r += aMean * bMean * (a.length()-lastObservedIndex - 1);
aSqrd += aMean * aMean * (a.length()-a.nnz());
bSqrd += bMean * bMean * (b.length()-b.nnz());
}
}
else//dense!
{
for(int i = 0; i < a.length(); i++)
{
double aTmp = a.get(i);
double bTmp = b.get(i);
if(bothNonZero && (aTmp == 0 || bTmp == 0))
continue;
double aVal = aTmp-aMean;
double bVal = bTmp-bMean;
r += aVal*bVal;
aSqrd += aVal*aVal;
bSqrd += bVal*bVal;
}
}
if(bSqrd == 0 && aSqrd == 0)
return 0;
else if(bSqrd == 0 || aSqrd == 0)
return r/Math.sqrt((aSqrd+1e-10)*(bSqrd+1e-10));
return r/Math.sqrt(aSqrd*bSqrd);
}
/*
* TODO Accerlation for Pearson can be done, its a little complicated (you
* cache the means and Sqrd values - so that you can do just 1 pass over all
* values). But thats a good bit of code, and the above needs to be cleaned
* up before implementing that.
*/
}
| 8,789 | 31.798507 | 122 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/SquaredEuclideanDistance.java |
package jsat.linear.distancemetrics;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.linear.SparseVector;
import jsat.linear.Vec;
import jsat.utils.DoubleList;
import jsat.utils.FakeExecutor;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.ParallelUtils;
/**
* In many applications, the squared {@link EuclideanDistance} is used because it avoids an expensive {@link Math#sqrt(double) } operation.
* However, the Squared Euclidean Distance is not a truly valid metric, as it does not obey the {@link #isSubadditive() triangle inequality}.
*
* @author Edward Raff
*/
public class SquaredEuclideanDistance implements DistanceMetric
{
private static final long serialVersionUID = 2966818558802484702L;
@Override
public double dist(Vec a, Vec b)
{
if (a.length() != b.length())
throw new ArithmeticException("Length miss match, vectors must have the same length");
double d = 0;
if( a instanceof SparseVector && b instanceof SparseVector)
{
//Just square the pNorm for now... not easy code to write, and the sparceness is more important
return Math.pow(a.pNormDist(2, b), 2);
}
else
{
double tmp;
for(int i = 0; i < a.length(); i++)
{
tmp = a.get(i) - b.get(i);
d += tmp*tmp;
}
}
return d;
}
@Override
public boolean isSymmetric()
{
return true;
}
@Override
public boolean isSubadditive()
{
return false;
}
@Override
public boolean isIndiscemible()
{
return true;
}
@Override
public double metricBound()
{
return Double.POSITIVE_INFINITY;
}
@Override
public String toString()
{
return "Squared Euclidean Distance";
}
@Override
public SquaredEuclideanDistance clone()
{
return new SquaredEuclideanDistance();
}
@Override
public boolean supportsAcceleration()
{
return true;
}
@Override
public List<Double> getAccelerationCache(List<? extends Vec> vecs, boolean parallel)
{
//Store the pnorms in the cache
double[] cache = new double[vecs.size()];
ParallelUtils.run(parallel, vecs.size(), (start, end) ->
{
for(int i = start; i < end; i++)
{
Vec v = vecs.get(i);
cache[i] = v.dot(v);
}
});
return DoubleList.view(cache, vecs.size());
}
@Override
public double dist(int a, int b, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return dist(vecs.get(a), vecs.get(b));
return (cache.get(a)+cache.get(b)-2*vecs.get(a).dot(vecs.get(b)));
}
@Override
public double dist(int a, Vec b, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return dist(vecs.get(a), b);
return (cache.get(a)+b.dot(b)-2*vecs.get(a).dot(b));
}
@Override
public List<Double> getQueryInfo(Vec q)
{
DoubleList qi = new DoubleList(1);
qi.add(q.dot(q));
return qi;
}
@Override
public double dist(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return dist(vecs.get(a), b);
return (cache.get(a)+qi.get(0)-2*vecs.get(a).dot(b));
}
}
| 3,684 | 24.413793 | 141 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/TrainableDistanceMetric.java |
package jsat.linear.distancemetrics;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.linear.Vec;
import jsat.regression.RegressionDataSet;
/**
* Some Distance Metrics require information that can be learned from the data set.
* Trainable Distance Metrics support this facility, and algorithms that rely on
* distance metrics should check if their metric needs training. This is needed
* priming the distance metric on the whole data set and then performing cross
* validation would bias the results, as the metric would have been trained on
* the testing set examples.
*
* @author Edward Raff
*/
abstract public class TrainableDistanceMetric implements DistanceMetric
{
private static final long serialVersionUID = 6356276953152869105L;
/**
* Trains this metric on the given data set
* @param <V> the type of vectors in the list
* @param dataSet the data set to train on
* @throws UnsupportedOperationException if the metric can not be trained from unlabeled data
*/
public <V extends Vec> void train(List<V> dataSet)
{
train(dataSet, false);
}
/**
* Trains this metric on the given data set
* @param <V> the type of vectors in the list
* @param dataSet the data set to train on
* @param parallel {@code true} if multiple threads should be used for
* training. {@code false} if it should be done in a single-threaded manner.
* @throws UnsupportedOperationException if the metric can not be trained from unlabeled data
*/
abstract public <V extends Vec> void train(List<V> dataSet, boolean parallel);
/**
* Trains this metric on the given data set
* @param dataSet the data set to train on
* @throws UnsupportedOperationException if the metric can not be trained from unlabeled data
*/
public void train(DataSet dataSet)
{
train(dataSet, false);
}
/**
* Trains this metric on the given data set
* @param dataSet the data set to train on
* @param parallel {@code true} if multiple threads should be used for
* training. {@code false} if it should be done in a single-threaded manner.
* @throws UnsupportedOperationException if the metric can not be trained from unlabeled data
*/
abstract public void train(DataSet dataSet, boolean parallel);
/**
* Trains this metric on the given classification problem data set
* @param dataSet the data set to train on
* @throws UnsupportedOperationException if the metric can not be trained from classification problems
*/
public void train(ClassificationDataSet dataSet)
{
train(dataSet, false);
}
/**
* Trains this metric on the given classification problem data set
*
* @param dataSet the data set to train on
* @param parallel {@code true} if multiple threads should be used for
* training. {@code false} if it should be done in a single-threaded manner.
* @throws UnsupportedOperationException if the metric can not be trained
* from classification problems
*/
abstract public void train(ClassificationDataSet dataSet, boolean parallel);
/**
* Some metrics might be special purpose, and not trainable for all types of data sets or tasks.
* This method returns <tt>true</tt> if this metric supports training for classification
* problems, and <tt>false</tt> if it does not. <br>
* If a metric can learn from unlabeled data, it must return <tt>true</tt>
* for this method.
*
* @return <tt>true</tt> if this metric supports training for classification
* problems, and <tt>false</tt> if it does not
*/
abstract public boolean supportsClassificationTraining();
/**
* Trains this metric on the given regression problem data set
* @param dataSet the data set to train on
* @throws UnsupportedOperationException if the metric can not be trained from regression problems
*/
abstract public void train(RegressionDataSet dataSet);
/**
* Trains this metric on the given regression problem data set
* @param dataSet the data set to train on
* @param parallel {@code true} if multiple threads should be used for
* training. {@code false} if it should be done in a single-threaded manner.
* @throws UnsupportedOperationException if the metric can not be trained from regression problems
*/
abstract public void train(RegressionDataSet dataSet, boolean parallel);
/**
* Some metrics might be special purpose, and not trainable for all types of data sets tasks.
* This method returns <tt>true</tt> if this metric supports training for regression
* problems, and <tt>false</tt> if it does not. <br>
* If a metric can learn from unlabeled data, it must return <tt>true</tt>
* for this method.
*
* @return <tt>true</tt> if this metric supports training for regression
* problems, and <tt>false</tt> if it does not
*/
abstract public boolean supportsRegressionTraining();
/**
* Returns <tt>true</tt> if the metric needs to be trained. This may be false if
* the metric allows the parameters to be specified beforehand. If the information
* was specified before hand, or does not need training, <tt>false</tt> is returned.
*
* @return <tt>true</tt> if the metric needs training, <tt>false</tt> if it does not.
*/
abstract public boolean needsTraining();
@Override
abstract public TrainableDistanceMetric clone();
/**
* Static helper method for training a distance metric only if it is needed.
* This method can be safely called for any Distance Metric.
*
* @param dm the distance metric to train
* @param dataset the data set to train from
*/
public static void trainIfNeeded(DistanceMetric dm, DataSet dataset)
{
trainIfNeeded(dm, dataset, false);
}
/**
* Static helper method for training a distance metric only if it is needed.
* This method can be safely called for any Distance Metric.
*
* @param dm the distance metric to train
* @param dataset the data set to train from
* @param parallel {@code true} if multiple threads should be used for
* training. {@code false} if it should be done in a single-threaded manner.
*/
public static void trainIfNeeded(DistanceMetric dm, DataSet dataset, boolean parallel)
{
if(!(dm instanceof TrainableDistanceMetric))
return;
TrainableDistanceMetric tdm = (TrainableDistanceMetric) dm;
if(!tdm.needsTraining())
return;
if(dataset instanceof RegressionDataSet)
tdm.train((RegressionDataSet) dataset, parallel);
else if(dataset instanceof ClassificationDataSet)
tdm.train((ClassificationDataSet) dataset, parallel);
else
tdm.train(dataset, parallel);
}
/**
* Static helper method for training a distance metric only if it is needed.
* This method can be safely called for any Distance Metric.
*
* @param dm the distance metric to train
* @param dataset the data set to train from
* @param threadpool the source of threads for parallel training. May be
* <tt>null</tt>, in which case {@link #trainIfNeeded(jsat.linear.distancemetrics.DistanceMetric, jsat.DataSet) }
* is used instead.
* @deprecated I WILL DELETE THIS METHOD SOON
*/
public static void trainIfNeeded(DistanceMetric dm, DataSet dataset, ExecutorService threadpool)
{
//TODO I WILL DELETE, JUST STUBBING FOR NOW TO MAKE LIFE EASY AS I DO ONE CODE SECTION AT A TIME
trainIfNeeded(dm, dataset);
}
/**
*
* @param <V>
* @param dm
* @param dataset
* @param threadpool
* @deprecated I WILL DELETE THIS METHOD SOON
*/
public static <V extends Vec> void trainIfNeeded(DistanceMetric dm, List<V> dataset, ExecutorService threadpool)
{
//TODO I WILL DELETE, JUST STUBBING FOR NOW TO MAKE LIFE EASY AS I DO ONE CODE SECTION AT A TIME
trainIfNeeded(dm, dataset, false);
}
/**
* Static helper method for training a distance metric only if it is needed.
* This method can be safely called for any Distance Metric.
*
* @param <V> the type of vectors in the list
* @param dm the distance metric to train
* @param dataset the data set to train from
*/
public static <V extends Vec> void trainIfNeeded(DistanceMetric dm, List<V> dataset)
{
trainIfNeeded(dm, dataset, false);
}
/**
*
* @param <V> the type of vectors in the list
* @param dm the distance metric to train
* @param dataset the data set to train from
* @param parallel {@code true} if multiple threads should be used for
* training. {@code false} if it should be done in a single-threaded manner.
*/
public static <V extends Vec> void trainIfNeeded(DistanceMetric dm, List<V> dataset, boolean parallel)
{
if(!(dm instanceof TrainableDistanceMetric))
return;
TrainableDistanceMetric tdm = (TrainableDistanceMetric) dm;
if(!tdm.needsTraining())
return;
if(dataset instanceof RegressionDataSet)
tdm.train((RegressionDataSet) dataset, parallel);
else if(dataset instanceof ClassificationDataSet)
tdm.train((ClassificationDataSet) dataset, parallel);
else
tdm.train(dataset, parallel);
}
}
| 9,780 | 39.251029 | 118 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/distancemetrics/WeightedEuclideanDistance.java |
package jsat.linear.distancemetrics;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.linear.Vec;
import jsat.linear.VecOps;
import jsat.utils.DoubleList;
import jsat.utils.FakeExecutor;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.ParallelUtils;
/**
* Implements the weighted Euclidean distance such that d(a, b) =
* <big>∑</big><sub>∀ i ∈ |w|</sub> w<sub>i</sub>
* (x<sub>i</sub>-y<sub>i</sub>)<sup>2</sup> <br>
* When used with a weight vector of ones, it degenerates into
* the {@link EuclideanDistance}.
*
* @author Edward Raff
*/
public class WeightedEuclideanDistance implements DistanceMetric
{
private static final long serialVersionUID = 2959997330647828673L;
private Vec w;
/**
* Creates a new weighted Euclidean distance metric using the
* given set of weights.
* @param w the weight to apply to each variable
*/
public WeightedEuclideanDistance(Vec w)
{
setWeight(w);
}
/**
* Returns the weight vector used by this object. Altering the returned
* vector is visible to this object, so there is no need to set it again
* using {@link #setWeight(jsat.linear.Vec) }. If you do not want to
* alter it, you will need to clone the returned object and modify that.
* @return the weight vector used by this object
*/
public Vec getWeight()
{
return w;
}
/**
* Sets the weight vector to use for the distance function
* @param w the weight vector to use
* @throws NullPointerException if {@code w} is null
*/
public void setWeight(Vec w)
{
if(w == null)
throw new NullPointerException();
this.w = w;
}
@Override
public double dist(Vec a, Vec b)
{
return Math.sqrt(VecOps.accumulateSum(w, a, b, (x)->x*x));
}
@Override
public boolean isSymmetric()
{
return true;
}
@Override
public boolean isSubadditive()
{
return true;
}
@Override
public boolean isIndiscemible()
{
return true;
}
@Override
public double metricBound()
{
return Double.POSITIVE_INFINITY;
}
@Override
public WeightedEuclideanDistance clone()
{
return new WeightedEuclideanDistance(w.clone());
}
/*
* Using: w_i (x_i - y_i)^2 = w_i x_i^2 - 2 w_i x_i y_i + w_i y_i^2
* Dots are a little weight, then use Vec ops for weighted dot
*
* also use : w_i x_i^2 = w_i (x_i x_i)
*
*/
@Override
public boolean supportsAcceleration()
{
return true;
}
@Override
public List<Double> getAccelerationCache(List<? extends Vec> vecs, boolean parallel)
{
//Store the pnorms in the cache
double[] cache = new double[vecs.size()];
ParallelUtils.run(parallel, vecs.size(), (start, end) ->
{
for(int i = start; i < end; i++)
{
Vec v = vecs.get(i);
cache[i] = VecOps.weightedDot(w, v, v);
}
});
return DoubleList.view(cache, vecs.size());
}
@Override
public double dist(int a, int b, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return dist(vecs.get(a), vecs.get(b));
return Math.sqrt(cache.get(a)+cache.get(b)-2*VecOps.weightedDot(w, vecs.get(a), vecs.get(b)));
}
@Override
public double dist(int a, Vec b, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return dist(vecs.get(a), b);
return Math.sqrt(cache.get(a)+VecOps.weightedDot(w, b, b)-2*VecOps.weightedDot(w, vecs.get(a), b));
}
@Override
public List<Double> getQueryInfo(Vec q)
{
DoubleList qi = new DoubleList(1);
qi.add(VecOps.weightedDot(w, q, q));
return qi;
}
@Override
public double dist(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache)
{
if(cache == null)
return dist(vecs.get(a), b);
return Math.sqrt(cache.get(a)+qi.get(0)-2*VecOps.weightedDot(w, vecs.get(a), b));
}
}
| 4,372 | 25.185629 | 107 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/solvers/ConjugateGradient.java |
package jsat.linear.solvers;
import jsat.linear.DenseVector;
import jsat.linear.Matrix;
import jsat.linear.Vec;
/**
* Provides an iterative implementation of the COnjugate Gradient Method.
* <br><br>
* The Conjugate method, if using exact arithmetic, produces the exact result after a finite
* number of iterations that is no more then the number of rows in the matrix. Because of this,
* no max iteration parameter is given.
* <br><br>
*
*
* @author Edward Raff
*/
public class ConjugateGradient
{
/**
* Uses the Conjugate Gradient method to solve a linear system of
* equations involving a symmetric positive definite matrix.<br><br>
* A symmetric positive definite matrix is a matrix A such that: <br>
* <ul>
* <li>A<sup>T</sup> = A</li>
* <li>x<sup>T</sup> * A * x > 0 for all x != 0</li>
* </ul>
* <br><br>
* NOTE: No checks will be performed to confirm these properties of the given matrix.
* If a matrix is given that does not meet this requirements, invalid results may be returned.
*
* @param eps the precision of the desired result.
* @param A the symmetric positive definite matrix
* @param x an initial guess for x, can be all zeros. This vector will be altered
* @param b the target values
* @return the approximate solution to the equation <i>A x = b</i>
*/
public static Vec solve(double eps, Matrix A, Vec x, Vec b)
{
if(!A.isSquare())
throw new ArithmeticException("A must be a square (symmetric & positive definite) matrix");
else if(A.rows() != b.length() || A.rows() != x.length())
throw new ArithmeticException("Matrix A dimensions do not agree with x and b");
int k = 0;
Vec r_k = b.subtract(A.multiply(x));
Vec p_k = r_k.clone();
Vec Apk;
double RdR = r_k.dot(r_k);
do
{
Apk = A.multiply(p_k);
double alpha_k = RdR / p_k.dot(Apk) ;
x.mutableAdd(alpha_k, p_k);
r_k.mutableAdd(-alpha_k, Apk);
double newRdR = r_k.dot(r_k);
//Stop when we are close enough
if(newRdR < eps*eps)
return x;
double beta_k = newRdR/RdR;
p_k.mutableMultiply(beta_k);
p_k.mutableAdd(r_k);
//Set up for next ste
RdR = newRdR;
}
while(k++ < A.rows());
return x;
}
public static Vec solve(Matrix A, Vec b)
{
DenseVector x = new DenseVector(b.length());
return solve(1e-10, A, x, b);
}
/**
* Uses the Conjugate Gradient method to solve a linear system of
* equations involving a symmetric positive definite matrix.<br><br>
* A symmetric positive definite matrix is a matrix A such that: <br>
* <ul>
* <li>A<sup>T</sup> = A</li>
* <li>x<sup>T</sup> * A * x > 0 for all x != 0</li>
* </ul>
* <br><br>
* NOTE: No checks will be performed to confirm these properties of the given matrix.
* If a matrix is given that does not meet this requirements, invalid results may be returned.
*
* @param eps the precision of the desired result.
* @param A the symmetric positive definite matrix
* @param x an initial guess for x, can be all zeros. This vector will be altered
* @param b the target values
* @param Minv the of a matric M, such that M is a symmetric positive definite matrix.
* Is applied as M<sup>-1</sup>( A x - b = 0) to increase convergence and stability.
* These increases are soley a property of M<sup>-1</sup>
*
* @return the approximate solution to the equation <i>A x = b</i>
*/
public static Vec solve(double eps, Matrix A, Vec x, Vec b, Matrix Minv)
{
if(!A.isSquare() || !Minv.isSquare())
throw new ArithmeticException("A and Minv must be square (symmetric & positive definite) matrix");
else if(A.rows() != b.length() || A.rows() != x.length())
throw new ArithmeticException("Matrix A dimensions do not agree with x and b");
else if(A.rows() != Minv.rows() || A.cols() != Minv.cols())
throw new ArithmeticException("Matrix A and Minv do not have the same dimmentions");
int k = 0;
Vec r_k = b.subtract(A.multiply(x));
Vec z_k = Minv.multiply(r_k);
Vec p_k = z_k.clone();
Vec Apk;
double rkzk = r_k.dot(z_k);
do
{
Apk = A.multiply(p_k);
double alpha = rkzk/p_k.dot(Apk);
x.mutableAdd(alpha, p_k);
r_k.mutableSubtract(alpha, Apk);
if(r_k.dot(r_k) < eps*eps)
return x;
z_k = Minv.multiply(r_k);//TODO implement method so we can reuse z_k space, instead of creating a new vector
double newRkZk = r_k.dot(z_k);
double beta = newRkZk/rkzk;
rkzk = newRkZk;
p_k.mutableMultiply(beta);
p_k.mutableAdd(z_k);
}
while(k++ < A.rows());
return x;
}
/**
* Uses the Conjugate Gradient method to compute the least squares solution to a system
* of linear equations.<br>
* Computes the least squares solution to A x = b. Where A is an m x n matrix and b is
* a vector of length m and x is a vector of length n
*
* <br><br>
* NOTE: Unlike {@link #solve(double, jsat.linear.Matrix, jsat.linear.Vec, jsat.linear.Vec) },
* the CGNR method does not need any special properties of the matrix. Because of this, slower
* convergence or numerical error can occur.
*
* @param eps the desired precision for the result
* @param A any m x n matrix
* @param x the initial guess for x, can be all zeros. This vector will be altered
* @param b the target values
* @return the least squares solution to A x = b
*/
public static Vec solveCGNR(double eps, Matrix A, Vec x, Vec b)
{
if(A.rows() != b.length())
throw new ArithmeticException("Dimensions do not agree for Matrix A and Vector b");
else if(A.cols() != x.length())
throw new ArithmeticException("Dimensions do not agree for Matrix A and Vector x");
//TODO write a version that does not explicityly form the transpose matrix
Matrix At = A.transpose();
Matrix AtA = At.multiply(A);
Vec AtB = At.multiply(b);
return solve(eps, AtA, x, AtB);
}
public static Vec solveCGNR(Matrix A, Vec b)
{
DenseVector x = new DenseVector(A.cols());
return solveCGNR(1e-10, A, x, b);
}
}
| 6,931 | 35.87234 | 120 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/BallTree.java | /*
* Copyright (C) 2018 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.linear.vectorcollection;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.IntStream;
import jsat.clustering.MEDDIT;
import jsat.clustering.PAM;
import jsat.clustering.TRIKMEDS;
import jsat.linear.DenseVector;
import jsat.linear.IndexValue;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.math.FastMath;
import jsat.utils.BoundedSortedList;
import jsat.utils.DoubleList;
import jsat.utils.IndexTable;
import jsat.utils.IntList;
import jsat.utils.IntSet;
import jsat.utils.ListUtils;
import jsat.utils.Pair;
import jsat.utils.concurrent.AtomicDoubleArray;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* This class implements the Ball Tree algorithm for accelerating nearest
* neighbor queries. Contained within this class are multiple methods of
* building Ball Trees. Options for changing construction can alter the method
* of {@link ConstructionMethod construction} of the hierarchy is made, or how
* {@link PivotSelection pivot} is selected. <br>
* The default method of construction and pivot selection for ball trees will
* work for most cases, but is not appicable for all distance metrics. If you
* are using an exotic distance metric the
* {@link BallTree.ConstructionMethod#TOP_DOWN_FARTHEST} and
* {@link PivotSelection#MEDOID} will work for any dataset, but may be
* slower.<br>
* <br>
* See:
* <ul>
* <li>Omohundro, S. M. (1989). Five Balltree Construction Algorithms (No.
* TR-89-063).</li>
* <li>Moore, A. W. (2000). The Anchors Hierarchy: Using the Triangle Inequality
* to Survive High Dimensional Data. In Proceedings of the Sixteenth Conference
* on Uncertainty in Artificial Intelligence (pp. 397–405). San Francisco, CA,
* USA: Morgan Kaufmann Publishers Inc. Retrieved from
* <a href="http://dl.acm.org/citation.cfm?id=2073946.2073993">here</a></li>
* </ul>
*
* @author Edward Raff
* @param <V>
*/
public class BallTree<V extends Vec> implements IncrementalCollection<V>, DualTree<V>
{
public static final int DEFAULT_LEAF_SIZE = 40;
private int leaf_size = DEFAULT_LEAF_SIZE;
private DistanceMetric dm;
private List<V> allVecs;
private List<Double> cache;
private ConstructionMethod construction_method;
private PivotSelection pivot_method;
private Node root;
@Override
public IndexNode getRoot()
{
return root;
}
@Override
public List<Double> getAccelerationCache()
{
return cache;
}
public enum ConstructionMethod
{
/**
* This represents a top-down construction approach, that can be used
* for any distance metric. At each branch the children are given
* initial prototypes. The left child has a prototype selected as the
* point farthest from the pivot, and the right child the point farthest
* from the left's. The points are split based on which prototype thye
* are closest too. The process continues recursively. <br>
*
* See: Moore, A. W. (2000). The Anchors Hierarchy: Using the Triangle
* Inequality to Survive High Dimensional Data. In Proceedings of the
* Sixteenth Conference on Uncertainty in Artificial Intelligence (pp.
* 397–405). San Francisco, CA, USA: Morgan Kaufmann Publishers Inc.
* Retrieved from http://dl.acm.org/citation.cfm?id=2073946.2073993 for
* details.
*/
TOP_DOWN_FARTHEST,
/**
* This represents a top-down construction approach similar to a
* KD-tree's construction. It requires a metric where it has access to
* meaningful feature values. At each node, the dimension with the
* largest spread in values is selected. Then the split is made based on
* sorting the found feature into two even halves.<br>
* See: Omohundro, S. M. (1989). Five Balltree Construction Algorithms
* (No. TR-89-063).
*/
KD_STYLE,
/**
* This represents a "middle-out" construction approach. Computational
* is works best when the {@link PivotSelection#CENTROID centroid} pivot
* selection method can be used.<br>
* See: Moore, A. W. (2000). The Anchors Hierarchy: Using the Triangle
* Inequality to Survive High Dimensional Data. In Proceedings of the
* Sixteenth Conference on Uncertainty in Artificial Intelligence (pp.
* 397–405). San Francisco, CA, USA: Morgan Kaufmann Publishers Inc.
* Retrieved from http://dl.acm.org/citation.cfm?id=2073946.2073993 for
* details.
*/
ANCHORS_HIERARCHY;
}
public enum PivotSelection
{
/**
* This method selects the pivot by taking the centroid (average) of all
* the data within a node. This method may not be applicable for all
* metrics, and can't be used for once for which there is no computable
* average.
*/
CENTROID
{
@Override
public Vec getPivot(boolean parallel, List<Integer> points, List<? extends Vec> data, DistanceMetric dm, List<Double> cache)
{
if (points.size() == 1)
return data.get(points.get(0)).clone();
Vec pivot = new DenseVector(data.get(points.get(0)).length());
for (int i : points)
pivot.mutableAdd(data.get(i));
pivot.mutableDivide(points.size());
return pivot;
}
},
/**
* This method selects the pivot by searching for the medoid of the
* data. This can be used in all circumstances, but may be slower.
*/
MEDOID
{
@Override
public Vec getPivot(boolean parallel, List<Integer> points, List<? extends Vec> data, DistanceMetric dm, List<Double> cache)
{
if (points.size() == 1)
return data.get(points.get(0)).clone();
int indx;
if(dm.isValidMetric())
indx = TRIKMEDS.medoid(parallel, points, data, dm, cache);
else
indx = PAM.medoid(parallel, points, data, dm, cache);
return data.get(indx);
}
},
/**
* This method selects the pivot by searching for an approximate medoid
* of the data. This can be used in all circumstances, but may be
* slower.
*/
MEDOID_APRX
{
@Override
public Vec getPivot(boolean parallel, List<Integer> points, List<? extends Vec> data, DistanceMetric dm, List<Double> cache)
{
if (points.size() == 1)
return data.get(points.get(0)).clone();
int indx;
//Faster to use exact search
if(points.size() < 1000)
{
if(dm.isValidMetric())
indx = TRIKMEDS.medoid(parallel, points, data, dm, cache);
else
indx = PAM.medoid(parallel, points, data, dm, cache);
}
else//Lets do approx search
indx = MEDDIT.medoid(parallel, points, 0.2, data, dm, cache);
return data.get(indx);
}
},
/**
* A random point will be selected as the pivot for the ball
*/
RANDOM
{
@Override
public Vec getPivot(boolean parallel, List<Integer> points, List<? extends Vec> data, DistanceMetric dm, List<Double> cache)
{
int indx = RandomUtil.getLocalRandom().nextInt(points.size());
return data.get(indx);
}
},;
public abstract Vec getPivot(boolean parallel, List<Integer> points, List<? extends Vec> data, DistanceMetric dm, List<Double> cache);
}
public BallTree()
{
this(new EuclideanDistance(), ConstructionMethod.KD_STYLE, PivotSelection.CENTROID);
}
public BallTree(DistanceMetric dm, ConstructionMethod method, PivotSelection pivot_method)
{
setConstruction_method(method);
setPivot_method(pivot_method);
setDistanceMetric(dm);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public BallTree(BallTree toCopy)
{
this(toCopy.dm, toCopy.construction_method, toCopy.pivot_method);
if(toCopy.allVecs != null)
this.allVecs = new ArrayList<>(toCopy.allVecs);
if(toCopy.cache != null)
this.cache = new DoubleList(toCopy.cache);
if(toCopy.root != null)
this.root = cloneChangeContext(toCopy.root);
this.leaf_size = toCopy.leaf_size;
}
@Override
public void setDistanceMetric(DistanceMetric dm)
{
this.dm = dm;
}
@Override
public DistanceMetric getDistanceMetric()
{
return dm;
}
/**
* Sets the number of points stored within a leaf node of the index. Larger
* values avoid search overhead, but reduce opportunities for pruning.
*
* @param leaf_size the size of a leaf node. Must be at least 2
*/
public void setLeafSize(int leaf_size)
{
if (leaf_size < 2)
throw new IllegalArgumentException("The leaf size must be >= 2 to support all splitting methods");
this.leaf_size = leaf_size;
}
/**
*
* @return the number of points to store within a leaf node
*/
public int getLeafSize()
{
return leaf_size;
}
/**
* Computes the maximum depth of the current tree. A value of zero indicates
* that only a root node exists or the tree is empty. Any other value is the
* maximum number of children a node contains.
*
* @return the maximum current depth of this Ball Tree
*/
public int getMaxDepth()
{
if(root == null)
return 0;
else
return root.findMaxDepth(0);
}
public void setPivot_method(PivotSelection pivot_method)
{
this.pivot_method = pivot_method;
}
public PivotSelection getPivot_method()
{
return pivot_method;
}
public void setConstruction_method(ConstructionMethod construction_method)
{
this.construction_method = construction_method;
}
public ConstructionMethod getConstruction_method()
{
return construction_method;
}
private Node build_far_top_down(List<Integer> points, boolean parallel)
{
Branch branch = new Branch();
branch.setPivot(points);
branch.setRadius(points);
//Use point farthest from parent pivot for left child
int f1 = ParallelUtils.streamP(points.stream(), parallel)
.map(i->new IndexDistPair(i, dm.dist(i, branch.pivot, branch.pivot_qi, allVecs, cache)))
.max(IndexDistPair::compareTo).orElse(new IndexDistPair(0, 0.0)).indx;
//use point farhter from f1 for right child
int f2 = ParallelUtils.streamP(points.stream(), parallel)
.map(i->new IndexDistPair(i, dm.dist(i, f1, allVecs, cache)))
.max(IndexDistPair::compareTo).orElse(new IndexDistPair(1, 0.0)).indx;
//Now split children based on who is closes to f1 and f2
IntList left_children = new IntList();
IntList right_children = new IntList();
for(int p : points)
{
double d_f1 = dm.dist(p, f1, allVecs, cache);
double d_f2 = dm.dist(p, f2, allVecs, cache);
if(d_f1 < d_f2)
left_children.add(p);
else
right_children.add(p);
}
if(left_children.isEmpty() || right_children.isEmpty())
{
//This can happen if all the points have the exact same value, so all distances are zero.
//So we can't branch, return a leaf node instead
left_children.addAll(right_children);
Leaf leaf = new Leaf(left_children);
leaf.pivot = branch.pivot;
leaf.pivot_qi = branch.pivot_qi;
leaf.radius = 0.0;//Weird, but correct! All dists = 0, so radius = 0
return leaf;
}
//everyone has been assigned, now creat children objects
branch.left_child = build(left_children, parallel);
branch.right_child = build(right_children, parallel);
branch.left_child.parent = branch;
branch.right_child.parent = branch;
return branch;
}
private Node build_kd(List<Integer> points, boolean parallel)
{
//Lets find the dimension with the maximum spread
int D = allVecs.get(0).length();
final boolean isSparse = allVecs.get(0).isSparse();
//If sparse, keep a set of indexes we HAVE NOT SEEN
//these have implicity zeros we need to add back at the end
final Set<Integer> neverSeen;
if (isSparse)
if (parallel)
{
neverSeen = ConcurrentHashMap.newKeySet();
ListUtils.addRange(neverSeen, 0, D, 1);
}
else
neverSeen = new IntSet(ListUtils.range(0, D));
else
neverSeen = Collections.EMPTY_SET;
AtomicDoubleArray mins = new AtomicDoubleArray(D);
mins.fill(Double.POSITIVE_INFINITY);
AtomicDoubleArray maxs = new AtomicDoubleArray(D);
maxs.fill(Double.NEGATIVE_INFINITY);
ParallelUtils.streamP(points.stream(), parallel).forEach(i->
{
for(IndexValue iv : get(i))
{
int d = iv.getIndex();
mins.updateAndGet(d, (m_d)->Math.min(m_d, iv.getValue()));
maxs.updateAndGet(d, (m_d)->Math.max(m_d, iv.getValue()));
neverSeen.remove(d);
}
});
IndexDistPair maxSpread = ParallelUtils.range(D, parallel)
.mapToObj(d->
{
double max_d = maxs.get(d), min_d = mins.get(d);
if(neverSeen != null && neverSeen.contains(d))
{
max_d = Math.max(max_d, 0);
min_d = Math.min(min_d, 0);
}
return new IndexDistPair(d, max_d - min_d);
})
.max(IndexDistPair::compareTo).get();
if(maxSpread.dist == 0)//all the data is the same? Return a leaf
{
Leaf leaf = new Leaf(new IntList(points));
leaf.setPivot(points);
leaf.setRadius(points);
return leaf;
}
//We found it! Lets sort points by this new value
final int d = maxSpread.indx;
points.sort((Integer o1, Integer o2) -> Double.compare(get(o1).get(d), get(o2).get(d)));
int midPoint = points.size()/2;
//Lets check that we don't have identical values, and adjust as needed
while(midPoint > 1 && get(midPoint-1).get(d) == get(midPoint).get(d))
midPoint--;
List<Integer> left_children = points.subList(0, midPoint);
List<Integer> right_children = points.subList(midPoint, points.size());
Branch branch = new Branch();
branch.setPivot(points);
branch.setRadius(points);
//everyone has been assigned, now creat children objects
branch.left_child = build(left_children, parallel);
branch.right_child = build(right_children, parallel);
branch.left_child.parent = branch;
branch.right_child.parent = branch;
return branch;
}
private Node build_anchors(List<Integer> points, boolean parallel)
{
//Ceiling to avoid issues with points rounding down to k=1, causing an infinite recusion
int K = (int) Math.ceil(Math.sqrt(points.size()));
int[] anchor_point_index = new int[K];
int[] anchor_index = new int[K];
IntList[] owned = new IntList[K];
//anchor paper says sort from hight dist to low, we do reverse for convience and removal efficiancy
DoubleList[] ownedDist = new DoubleList[K];
for(int k = 1; k < K; k++)
{
owned[k] = new IntList();
ownedDist[k] = new DoubleList();
}
Random rand = RandomUtil.getRandom();
//First case is special, select anchor at random and create list
anchor_point_index[0] =rand.nextInt(points.size());
anchor_index[0] = points.get(anchor_point_index[0]);
owned[0] = IntList.range(points.size());
ownedDist[0] = DoubleList.view(ParallelUtils.streamP(owned[0].streamInts(), parallel)
.mapToDouble(i->dm.dist(anchor_index[0], points.get(i), allVecs, cache))
.toArray(),
points.size());
IndexTable it = new IndexTable(ownedDist[0]);
it.apply(owned[0]);
it.apply(ownedDist[0]);
//Now lets create the other anchors
for(int k = 1; k < K; k++)
{
/*
* How is the new anchor a^new chosen? We simply find the current
* anchor a^maxrad with the largest radius, and choose the pivot of
* a^new to be the point owned by a^maxrad that is furthest from
* a^maxrad
*/
int max_radius_anch = IntStream.range(0, k).mapToObj(z-> new IndexDistPair(z, ownedDist[z].get(ownedDist[z].size()-1)))
.max(IndexDistPair::compareTo)
.get().indx;
anchor_point_index[k] = owned[max_radius_anch].getI(owned[max_radius_anch].size()-1);
anchor_index[k] = points.get(anchor_point_index[k]);
owned[max_radius_anch].remove(owned[max_radius_anch].size()-1);
ownedDist[max_radius_anch].remove(ownedDist[max_radius_anch].size()-1);
owned[k].add(anchor_point_index[k]);
ownedDist[k].add(0.0);
//lets go through other anchors and see what we can steal
for(int j = 0; j < k; j++)
{
double dist_ak_aj = dm.dist(anchor_index[j], anchor_index[k], allVecs, cache);
ListIterator<Integer> ownedIter = owned[j].listIterator(owned[j].size());
ListIterator<Double> ownedDistIter = ownedDist[j].listIterator(ownedDist[j].size());
while(ownedIter.hasPrevious())
{
int point_indx = ownedIter.previous();
double dist_aj_x = ownedDistIter.previous();
double dist_ak_x = dm.dist(anchor_index[k], points.get(point_indx), allVecs, cache);
if(dist_ak_x < dist_aj_x)//we can steal this point!
{
owned[k].add(point_indx);
ownedDist[k].add(dist_ak_x);
ownedIter.remove();
ownedDistIter.remove();
}
else if(dist_ak_x < dist_ak_aj/2)
{
//"we can deduce that the remainder of the points in ai's list cannot possibly be stolen"
break;
}
}
}
//now sort our new children
it = new IndexTable(ownedDist[k]);
it.apply(owned[k]);
it.apply(ownedDist[k]);
}
//Now we have sqrt(R) anchors. Lets do the middle-down first, creating Nodes for each anchor
List<Node> anchor_nodes = new ArrayList<>();
for (int k = 0; k < K; k++)
{
Node n_k = build(IntList.view(owned[k].streamInts().map(i->points.get(i)).toArray()), parallel);
n_k.pivot = get(anchor_index[k]);
n_k.radius = ownedDist[k].getD(ownedDist[k].size()-1);
anchor_nodes.add(n_k);
}
//TODO below code is ugly... needs improvement
//Ok, now lets go middle-up to finish the tree
//We will store the costs of merging any pair of anchor_nodes in this map
Map<Pair<Integer, Integer>, Double> mergeCost = new HashMap<>();
Map<Pair<Integer, Integer>, Vec> pivotCache = new HashMap<>();
//use a priority queue to pop of workers, and use values from mergeCost to sort
List<PriorityQueue<Pair<Integer, Integer>>> mergeQs = new ArrayList<>();
PriorityQueue<Integer> QQ = new PriorityQueue<>((q1, q2)->
{
double v1 = mergeCost.get(mergeQs.get(q1).peek());
double v2 = mergeCost.get(mergeQs.get(q2).peek());
return Double.compare(v1, v2);
});
///Initial population of Qs and costs
for(int k = 0; k < K; k++)
{
PriorityQueue<Pair<Integer, Integer>> mergeQ_k = new PriorityQueue<>((Pair<Integer, Integer> o1, Pair<Integer, Integer> o2) ->
Double.compare(mergeCost.get(o1), mergeCost.get(o2)));
mergeQs.add(mergeQ_k);
Node n_k = anchor_nodes.get(k);
IntList owned_nk = new IntList();
for(int i : n_k)
owned_nk.add(i);
int size_k = owned_nk.size();
for(int z = k+1; z < K; z++)
{
Node n_z = anchor_nodes.get(z);
Pair<Integer, Integer> p = new Pair<>(k, z);
IntList owned_nkz = new IntList(owned_nk);
int size_z, size_nk;
for(int i : n_z)
owned_nkz.add(i);
size_nk = owned_nkz.size();
size_z = size_nk-size_k;
Vec pivot_candidate;
if(pivot_method == PivotSelection.CENTROID)
{
//we can directly compute the would-be pivot
pivot_candidate = n_k.pivot.clone();
pivot_candidate.mutableMultiply(size_k/(double)size_nk);
pivot_candidate.mutableAdd(size_z/(double)size_nk, n_z.pivot);
}
else//we need to compute the pivot
pivot_candidate = pivot_method.getPivot(parallel, owned_nkz, allVecs, dm, cache);
List<Double> pivor_candidate_qi = dm.getQueryInfo(pivot_candidate);
//what would the radius be?
double radius_kz = 0;
for(int i : owned_nkz)
radius_kz = Math.max(radius_kz, dm.dist(i, pivot_candidate, pivor_candidate_qi, allVecs, cache));
mergeCost.put(p, radius_kz);
pivotCache.put(p, pivot_candidate);
mergeQ_k.add(p);
}
if(!mergeQ_k.isEmpty())
QQ.add(k);
}
//Now lets start merging!
Branch toReturn = null;
while(!QQ.isEmpty())
{
int winningQ = QQ.poll();
Pair<Integer, Integer> toMerge = mergeQs.get(winningQ).poll();
int other = toMerge.getSecondItem();
if(anchor_nodes.get(winningQ) == null)//leftover, its gone
continue;
else if(anchor_nodes.get(other) == null)
{
if(!mergeQs.get(winningQ).isEmpty())//stale, lets fix
QQ.add(winningQ);
continue;
}
Branch merged = toReturn = new Branch();
merged.pivot = pivotCache.get(toMerge);
merged.pivot_qi = dm.getQueryInfo(merged.pivot);
merged.radius = mergeCost.get(toMerge);
merged.left_child = anchor_nodes.get(winningQ);
merged.right_child = anchor_nodes.get(other);
merged.left_child.parent = merged;
merged.right_child.parent = merged;
anchor_nodes.set(winningQ, merged);
anchor_nodes.set(other, null);
//OK, we have merged two points. Now book keeping. Remove all Qs
PriorityQueue<Pair<Integer, Integer>> mergeQ_k = new PriorityQueue<>((Pair<Integer, Integer> o1, Pair<Integer, Integer> o2) ->
Double.compare(mergeCost.get(o1), mergeCost.get(o2)));
mergeQs.set(winningQ, mergeQ_k);
Node n_k = merged;
IntList owned_nk = new IntList();
for(int i : n_k)
owned_nk.add(i);
int size_k = owned_nk.size();
for(int z = 0; z < anchor_nodes.size(); z++)
{
if(z == winningQ)
continue;
if(anchor_nodes.get(z) == null)
continue;
Node n_z = anchor_nodes.get(z);
Pair<Integer, Integer> p;
if(winningQ < z)
p = new Pair<>(winningQ, z);
else
p = new Pair<>(z, winningQ);
IntList owned_nkz = new IntList(owned_nk);
int size_z, size_nk;
for(int i : n_z)
owned_nkz.add(i);
size_nk = owned_nkz.size();
size_z = size_nk-size_k;
Vec pivot_candidate;
if(pivot_method == PivotSelection.CENTROID)
{
//we can directly compute the would-be pivot
pivot_candidate = n_k.pivot.clone();
pivot_candidate.mutableMultiply(size_k/(double)size_nk);
pivot_candidate.mutableAdd(size_z/(double)size_nk, n_z.pivot);
}
else//we need to compute the pivot
pivot_candidate = pivot_method.getPivot(parallel, owned_nkz, allVecs, dm, cache);
List<Double> pivor_candidate_qi = dm.getQueryInfo(pivot_candidate);
//what would the radius be?
double radius_kz = 0;
for(int i : owned_nkz)
radius_kz = Math.max(radius_kz, dm.dist(i, pivot_candidate, pivor_candidate_qi, allVecs, cache));
pivotCache.put(p, pivot_candidate);
if(winningQ < z)
{
mergeCost.put(p, radius_kz);
mergeQ_k.add(p);
}
else
{
mergeQs.get(z).remove(p);
mergeCost.put(p, radius_kz);
mergeQs.get(z).add(p);
}
}
if(!mergeQ_k.isEmpty())
QQ.add(winningQ);
}
return toReturn;
}
private Node build(List<Integer> points, boolean parallel)
{
//universal base case
if(points.size() <= leaf_size)
{
Leaf leaf = new Leaf(new IntList(points));
leaf.setPivot(points);
leaf.setRadius(points);
return leaf;
}
switch(construction_method)
{
case ANCHORS_HIERARCHY:
return build_anchors(points, parallel);
case KD_STYLE:
return build_kd(points, parallel);
case TOP_DOWN_FARTHEST:
return build_far_top_down(points, parallel);
}
return new Leaf(new IntList(0));
}
@Override
public void build(boolean parallel, List<V> collection, DistanceMetric dm)
{
this.allVecs = new ArrayList<>(collection);
setDistanceMetric(dm);
this.cache = dm.getAccelerationCache(allVecs, parallel);
this.root = build(IntList.range(collection.size()), parallel);
}
@Override
public void insert(V x)
{
if(root == null)
{
allVecs = new ArrayList<>();
allVecs.add(x);
cache = dm.getAccelerationCache(allVecs);
root = new Leaf(IntList.range(1));
root.pivot = x.clone();
root.pivot_qi = dm.getQueryInfo(x);
root.radius = 0;
return;
}
int indx = allVecs.size();
allVecs.add(x);
if(cache != null)
cache.addAll(dm.getQueryInfo(x));
Branch parentNode = null;
Node curNode = root;
double dist_to_curNode = dm.dist(indx, curNode.pivot, curNode.pivot_qi, allVecs, cache);
while(curNode != null)
{
curNode.radius = Math.max(curNode.radius, dist_to_curNode);
if(curNode instanceof jsat.linear.vectorcollection.BallTree.Leaf)
{
Leaf lroot = (Leaf) curNode;
lroot.children.add(indx);
if(lroot.children.size() > leaf_size)
{
Node newNode = build(lroot.children, false);
if(parentNode == null)//We are the root node and a leaf
root = newNode;
else if(parentNode.left_child == curNode)//YES, intentinoally checking object equality
parentNode.left_child = newNode;
else
parentNode.right_child = newNode;
}
return;
}
else
{
Branch b = (Branch) curNode;
double left_dist = dm.dist(indx, b.left_child.pivot, b.left_child.pivot_qi, allVecs, cache);
double right_dist = dm.dist(indx, b.right_child.pivot, b.right_child.pivot_qi, allVecs, cache);
boolean goLeftBranch;
goLeftBranch = left_dist < right_dist;
//decend tree
parentNode = b;
if(goLeftBranch)
{
curNode = b.left_child;
dist_to_curNode = left_dist;
}
else
{
curNode = b.right_child;
dist_to_curNode = right_dist;
}
}
}
}
@Override
public BallTree<V> clone()
{
return new BallTree<>(this);
}
@Override
public void search(Vec query, double range, List<Integer> neighbors, List<Double> distances)
{
neighbors.clear();
distances.clear();
root.search(query, dm.getQueryInfo(query), range, neighbors, distances);
IndexTable it = new IndexTable(distances);
it.apply(distances);
it.apply(neighbors);
}
@Override
public void search(Vec query, int numNeighbors, List<Integer> neighbors, List<Double> distances)
{
neighbors.clear();
distances.clear();
BoundedSortedList<IndexDistPair> knn = new BoundedSortedList<>(numNeighbors);
root.search(query, dm.getQueryInfo(query), numNeighbors, knn, Double.POSITIVE_INFINITY);
for(IndexDistPair p : knn)
{
neighbors.add(p.indx);
distances.add(p.dist);
}
}
@Override
public V get(int indx)
{
return allVecs.get(indx);
}
@Override
public int size()
{
return allVecs.size();
}
private abstract class Node implements Cloneable, Serializable, Iterable<Integer>, IndexNode<Node>
{
Vec pivot;
List<Double> pivot_qi;
double radius;
Node parent;
double parrent_dist = Double.POSITIVE_INFINITY;
public Node()
{
}
public Node(Node toCopy)
{
if(toCopy.pivot != null)
this.pivot = toCopy.pivot.clone();
if(toCopy.pivot_qi != null)
this.pivot_qi = new DoubleList(toCopy.pivot_qi);
this.radius = toCopy.radius;
}
public void setPivot(List<Integer> points)
{
if(points.size() == 1)
pivot = get(points.get(0)).clone();
else
pivot = pivot_method.getPivot(false, points, allVecs, dm, cache);
pivot_qi = dm.getQueryInfo(pivot);
}
public void setRadius(List<Integer> points)
{
this.radius = 0;
for(int i : points)
radius = Math.max(radius, dm.dist(i, pivot, pivot_qi, allVecs, cache));
}
abstract public int findMaxDepth(int curDepth);
abstract public void search(Vec query, List<Double> qi, double range, List<Integer> neighbors, List<Double> distances);
abstract public void search(Vec query, List<Double> qi, int numNeighbors, BoundedSortedList<IndexDistPair> knn, double pivot_to_query);
@Override
public double minNodeDistance(int other)
{
return 0;
}
@Override
public double minNodeDistance(Node other)
{
return dm.dist(this.pivot, other.pivot) - this.radius - other.radius;
}
@Override
public double furthestDescendantDistance()
{
return radius;
}
@Override
public double maxNodeDistance(Node other)
{
return dm.dist(this.pivot, other.pivot) + this.radius + other.radius;
}
@Override
public double[] minMaxDistance(Node other)
{
double d = dm.dist(this.pivot, other.pivot);
return new double[]
{
d - this.radius - other.radius,
d + this.radius + other.radius
};
}
@Override
public double furthestPointDistance()
{
return 0;//don't own any points, so dist is zero
}
@Override
public Node getParrent()
{
return parent;
}
// @Override
// public double getParentDistance()
// {
// return parrent_dist;
// }
@Override
public Vec getVec(int indx)
{
return get(indx);
}
}
private class Leaf extends Node
{
IntList children;
public Leaf(IntList children)
{
this.children = children;
}
public Leaf(Leaf toCopy)
{
super(toCopy);
this.children = new IntList(toCopy.children);
}
@Override
public void search(Vec query, List<Double> qi, double range, List<Integer> neighbors, List<Double> distances)
{
for(int indx : children)
{
double dist = dm.dist(indx, query, qi, allVecs, cache);
if(dist <= range)
{
neighbors.add(indx);
distances.add(dist);
}
}
}
@Override
public void search(Vec query, List<Double> qi, int numNeighbors, BoundedSortedList<IndexDistPair> knn, double pivot_to_query)
{
for(int indx : children)
knn.add(new IndexDistPair(indx, dm.dist(indx, query, qi, allVecs, cache)));
}
@Override
public Iterator<Integer> iterator()
{
return children.iterator();
}
@Override
public int findMaxDepth(int curDepth)
{
return curDepth;
}
@Override
public int numChildren()
{
return 0;
}
@Override
public IndexNode getChild(int indx)
{
throw new IndexOutOfBoundsException("Leaf nodes do not have children");
}
@Override
public int numPoints()
{
return children.size();
}
@Override
public int getPoint(int indx)
{
return children.get(indx);
}
}
private class Branch extends Node
{
Node left_child;
Node right_child;
public Branch()
{
}
public int findMaxDepth(int curDepth)
{
return Math.max(left_child.findMaxDepth(curDepth+1), right_child.findMaxDepth(curDepth+1));
}
public Branch(Branch toCopy)
{
super(toCopy);
this.left_child = cloneChangeContext(toCopy.left_child);
this.right_child = cloneChangeContext(toCopy.right_child);
}
@Override
public void search(Vec query, List<Double> qi, double range, List<Integer> neighbors, List<Double> distances)
{
if(dm.dist(query, pivot) - radius >= range)
return;//We can prune this branch!
left_child.search(query, qi, range, neighbors, distances);
right_child.search(query, qi, range, neighbors, distances);
}
@Override
public void search(Vec query, List<Double> qi, int numNeighbors, BoundedSortedList<IndexDistPair> knn, double pivot_to_query)
{
if(Double.isInfinite(pivot_to_query))//can happen for first call
pivot_to_query = dm.dist(query, pivot);
if(knn.size() >= numNeighbors && pivot_to_query - radius >= knn.last().dist)
return;//We can prune this branch!
double dist_left = dm.dist(query, left_child.pivot);
double dist_right = dm.dist(query, right_child.pivot);
double close_child_dist = dist_left;
Node close_child = left_child;
double far_child_dist = dist_right;
Node far_child = right_child;
if(dist_right < dist_left)
{
close_child_dist = dist_right;
close_child = right_child;
far_child_dist = dist_left;
far_child = left_child;
}
close_child.search(query, qi, numNeighbors, knn, close_child_dist);
far_child.search(query, qi, numNeighbors, knn, far_child_dist);
}
@Override
public Iterator<Integer> iterator()
{
Iterator<Integer> iter_left = left_child.iterator();
if(right_child == null)
System.out.println("AWD?");
Iterator<Integer> iter_right = right_child.iterator();
return new Iterator<Integer>()
{
@Override
public boolean hasNext()
{
return iter_left.hasNext() || iter_right.hasNext();
}
@Override
public Integer next()
{
if(iter_left.hasNext())
return iter_left.next();
else
return iter_right.next();
}
};
}
@Override
public int numChildren()
{
return 2;
}
@Override
public IndexNode getChild(int indx)
{
switch(indx)
{
case 0:
return left_child;
case 1:
return right_child;
default:
throw new IndexOutOfBoundsException();
}
}
@Override
public int numPoints()
{
return 0;
}
@Override
public int getPoint(int indx)
{
throw new IndexOutOfBoundsException("Branching node does not contain any children");
}
}
private Node cloneChangeContext(Node toClone)
{
if (toClone != null)
if (toClone instanceof jsat.linear.vectorcollection.BallTree.Leaf)
return new Leaf((Leaf) toClone);
else
return new Branch((Branch) toClone);
return null;
}
}
| 41,434 | 34.475171 | 143 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/BaseCaseDT.java | /*
* Copyright (C) 2018 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.linear.vectorcollection;
/**
*
* @author Edward Raff
*/
public interface BaseCaseDT
{
public double base_case(int r_indx, int q_indx);
}
| 848 | 30.444444 | 72 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/CoverTree.java | /*
* Copyright (C) 2017 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.linear.vectorcollection;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.Stack;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.math.FastMath;
import jsat.utils.BoundedSortedList;
import jsat.utils.DoubleList;
import jsat.utils.IndexTable;
import jsat.utils.IntList;
import jsat.utils.IntSet;
import jsat.utils.ListUtils;
import jsat.utils.Pair;
import jsat.utils.concurrent.AtomicDouble;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.XORWOW;
/**
* This class implements the Cover-tree algorithm for answering nearest neighbor
* queries. In particular, it uses the "Simplified Cover-tree" algorithm. <br>
* Note, this implementation does not yet support parallel construction.
* <br>
*
* See:
* <ul>
* <li>Beygelzimer, A., Kakade, S., & Langford, J. (2006). Cover trees for
* nearest neighbor. In International Conference on Machine Learning (pp.
* 97–104). New York: ACM. Retrieved from
* <a href="http://www.cs.princeton.edu/courses/archive/spr05/cos598E/bib/covertree.pdf">here</a></li>
* <li>Izbicki, M., & Shelton, C. R. (2015). Faster Cover Trees. In Proceedings
* of the Thirty-Second International Conference on Machine Learning (Vol.
* 37).</li>
* </ul>
*
* @author Edward Raff
*/
public final class CoverTree<V extends Vec> implements IncrementalCollection<V>
{
private DistanceMetric dm;
private List<V> vecs;
private List<Double> accell_cache = null;
private TreeNode root = null;
private boolean maxDistDirty = false;
// private boolean nearest_ancestor = false;
private boolean looseBounds = false;
private static final int min_pow_map = -120;
private static final int max_pow_map = 1000;
private static final float[] pow_map = new float[max_pow_map-min_pow_map];
private static final double base = 1.3;
private static final double log2_base = Math.log(base)/Math.log(2);
static
{
for(int pow_indx = min_pow_map; pow_indx < max_pow_map; pow_indx++)
pow_map[pow_indx-min_pow_map] = (float) Math.pow(base, pow_indx);
}
private static double pow(int expo)
{
if(expo >= min_pow_map && expo < max_pow_map)
return pow_map[expo-min_pow_map];
else
return Math.pow(base, expo);
}
public CoverTree(DistanceMetric dm)
{
this.dm = dm;
vecs = new ArrayList<>();
}
public CoverTree(List<V> source, DistanceMetric dm)
{
this(source, dm, false);
}
public CoverTree(List<V> source, DistanceMetric dm, boolean parallel)
{
this(source, dm, parallel, false);
}
public CoverTree(List<V> source, DistanceMetric dm, boolean parallel, boolean looseBounds)
{
setLooseBounds(looseBounds);
build(parallel, source, dm);
}
public CoverTree(CoverTree<V> toCopy)
{
this.dm = toCopy.dm.clone();
this.looseBounds = toCopy.looseBounds;
this.vecs = new ArrayList<>(toCopy.vecs);
if(toCopy.accell_cache != null)
this.accell_cache = new DoubleList(toCopy.accell_cache);
if(toCopy.root != null)
this.root = new TreeNode(toCopy.root);
}
@Override
public List<Double> getAccelerationCache()
{
return accell_cache;
}
@Override
public void build(boolean parallel, List<V> collection, DistanceMetric dm)
{
this.dm = dm;
setLooseBounds(looseBounds);
this.vecs = new ArrayList<>(collection);
this.accell_cache = dm.getAccelerationCache(vecs, parallel);
//Cover Tree is sensative to insertion order, so lets make sure its random
IntList order = new IntList(this.vecs.size());
ListUtils.addRange(order, 0, this.vecs.size(), 1);
// Set<Integer> S = getSet(parallel);
// S.addAll(order);
// int p = S.stream().findAny().get();
// S.remove(p);
//
// this.root = new TreeNode(p);
// construct(parallel, root, S, getSet(parallel), Integer.MAX_VALUE);
Collections.shuffle(order, new XORWOW(54321));
int pos = 0;
for(int i : order)
{
root = simpleInsert(root, i);
pos++;
// System.out.println("\t" + pos + " vs " + this.root.magnitude());
}
// System.out.println(this.vecs.size() + " vs " + this.root.magnitude());
if(!this.looseBounds)//pre-compute all max-dist bounds used during search
{
this.root.maxdist();
Iterator<TreeNode> iter = this.root.descendants();
while(iter.hasNext())
{
iter.next().maxdist();
}
}
}
/**
*
* @param p the point p
* @return
*/
private Set<Integer> construct(boolean parallel, TreeNode p, Set<Integer> near, Set<Integer> far, int level)
{
if(near.isEmpty())
return far;
Set<Integer> workingNearSet;
if(level == Integer.MAX_VALUE)//We need to figure out the correct level and do the split at once to avoid duplicate work
{
int[] points = near.stream().mapToInt(i->i).toArray();
double[] dists = new double[points.length];
double maxDist = ParallelUtils.run(parallel, points.length, (start, end)->
{
double max_ = 0;
for(int i = start; i < end; i++)
{
dists[i] = dm.dist(p.vec_indx, points[i], vecs, accell_cache);
max_ = Math.max(max_, dists[i]);
}
return max_;
}, (a, b) -> Math.max(a, b));
level = p.level = (int) Math.ceil(FastMath.log2(maxDist)/log2_base+1e-4);
p.maxdist = maxDist;
double r_split = pow(p.level-1);
near.clear();
Set<Integer> newNear = getSet(parallel);
Set<Integer> newFar = getSet(parallel);
ParallelUtils.run(parallel, points.length, (start, end)->
{
for(int i = start; i < end; i++)
{
double d_i = dists[i];
if(d_i <= r_split)
newNear.add(points[i]);
else if (d_i < 2 * r_split)
newFar.add(points[i]);
else
near.add(points[i]);
}
});
workingNearSet = construct(parallel, p, newNear, newFar, p.level-1);
}
else
{
Pair<Set<Integer>, Set<Integer>> pairRet = split(parallel, p.vec_indx, pow(level-1), near);
workingNearSet = construct(parallel, p, pairRet.getFirstItem(), pairRet.getSecondItem(), level-1);
}
while(!workingNearSet.isEmpty())
{
//(i) pick q in NEAR
int q_indx = workingNearSet.stream().findAny().get();
workingNearSet.remove(q_indx);
TreeNode q = new TreeNode(q_indx, level-1);
//(ii) <CHILD, UNUSED> = Construct (q, SPLIT(d(q, ·), 2^(i−1),NEAR,FAR), i−1)
Set<Integer> unused = construct(parallel, q, workingNearSet, far, level-1);
//(iii) add CHILD to Children(pi)
p.addChild(q);
//(iv) let <NEW-NEAR, NEW-FAR> =SPLIT(d(p, ·), 2^i,UNUSED)
Pair<Set<Integer>, Set<Integer>> newPiar = split(parallel, p.vec_indx, pow(level), unused);
Set<Integer> newNear = newPiar.getFirstItem();
Set<Integer> newFar = newPiar.getSecondItem();
//(v) add NEW-FAR to FAR, and NEW-NEAR to NEAR.
far.addAll(newFar);
workingNearSet.addAll(newNear);
}
return far;
}
private Pair<Set<Integer>, Set<Integer>> split(boolean parallel, int p, double r, Set<Integer>... S)
{
Set<Integer> newNear = getSet(parallel);
Set<Integer> newFar = getSet(parallel);
for(Set<Integer> S_i : S)
{
int[] toRemove = ParallelUtils.streamP(S_i.stream(), parallel).mapToInt(i->
{
double d_i = dm.dist(p, i, vecs, accell_cache);
if(d_i <= r)
newNear.add(i);
else if(d_i < 2*r)
newFar.add(i);
else
return -1;//-1 will be 'removed' from the set S_i. but -1 isn't a valid index. So no impact
return i;
}).distinct().toArray();
S_i.removeAll(IntList.view(toRemove));
}
return new Pair<>(newNear, newFar);
}
private Set<Integer> getSet(boolean parallel)
{
Set<Integer> newNear;
if(parallel)
newNear = ConcurrentHashMap.newKeySet();
else
newNear = new IntSet();
return newNear;
}
@Override
public void setDistanceMetric(DistanceMetric dm)
{
this.dm = dm;
}
@Override
public DistanceMetric getDistanceMetric()
{
return dm;
}
public void setLooseBounds(boolean looseBounds)
{
this.looseBounds = looseBounds;
}
@Override
public void search(Vec query, double range, List<Integer> neighbors, List<Double> distances)
{
neighbors.clear();
distances.clear();
this.root.findNN(range, query, dm.getQueryInfo(query), neighbors, distances, -1.0);
IndexTable it = new IndexTable(distances);
it.apply(distances);
it.apply(neighbors);
}
@Override
public void search(Vec query, int numNeighbors, List<Integer> neighbors, List<Double> distances)
{
// if(maxDistDirty && ! looseBounds)
// {
// this.root.invalidateMaxDist();
// maxDistDirty = false;
// }
BoundedSortedList<IndexDistPair> bsl = new BoundedSortedList<>(numNeighbors);
this.root.findNN(numNeighbors, query, dm.getQueryInfo(query), bsl);
neighbors.clear();
distances.clear();
for(IndexDistPair a : bsl)
{
neighbors.add(a.getIndex());
distances.add(a.getDist());
}
}
@Override
public int size()
{
return vecs.size();
}
@Override
public V get(int indx)
{
return vecs.get(indx);
}
@Override
public CoverTree<V> clone()
{
return new CoverTree<>(this);
}
protected void simpleInsert(V x)
{
int x_indx = this.vecs.size();
this.vecs.add(x);
if(this.accell_cache == null && dm.supportsAcceleration())
this.accell_cache = new DoubleList();
if(this.accell_cache != null)
this.accell_cache.addAll(dm.getQueryInfo(x));
if(this.root == null)//start the tree
{
this.root = new TreeNode(x_indx);
// this.root.setLevel(0);
}
else//actually insert
this.root = simpleInsert(root, x_indx);
}
protected TreeNode simpleInsert(TreeNode p, int x_indx)
{
if(this.root == null)//start the tree
{
this.root = new TreeNode(x_indx);
// this.root.setLevel(0);
return this.root;
}
double p_x_dist = p.dist(x_indx);
if(p_x_dist > p.covdist())//line 1
{
/*
* If the insetion point x has a distance that is SUPER far away,
* the below bound may never hold. Thus, lets detect loops and short
* circuit
*/
final int start_indx = p.vec_indx;
if(p_x_dist - pow(p.level+1) < base*p.covdist())//if this is true, the condition will be true for p AND ALL CHILDREN OF P
while(p_x_dist > base*p.covdist() && !p.isLeaf())//line 2
{
//3: remove any leaf q from p
TreeNode q;
q = p.removeAnyLeaf();
//4: p' = tree with root q and p as only child
TreeNode p_prime = q;
p_prime.addChild(p);
p_prime.fixLevel();
p = p_prime;//5: p = p'
p_x_dist = p.dist(x_indx);
if(p.vec_indx == start_indx)//WE HAVE DONE THIS BEFORE
break;
}
//6: return tree with x as root and p as only child
TreeNode X = new TreeNode(x_indx);
X.addChild(p);
X.fixLevel();
return X;
}
//return insert_(p,x)
return simpleInsert_(p, x_indx);
}
/**
* prerequisites: d(p,x) ≤ covdist(p)
* @param p
* @param x_indx
* @return
*/
protected TreeNode simpleInsert_(TreeNode p, int x_indx)
{
// if(nearest_ancestor)
// {
// double[] dist_to_x = new double[p.numChildren()];
// for(int i = 0; i < dist_to_x.length; i++)
// dist_to_x[i] = p.getChild(i).dist(x_indx);
// IndexTable it = new IndexTable(dist_to_x);
//
// for(int order = 0; order < p.numChildren(); order++) //Line 1:
// {
// int q_indx = it.index(order);
// TreeNode q = p.getChild(q_indx);
// if(q.dist(x_indx) <= q.covdist()) //line 2: d(q,x)≤covdist(q)
// {
// //3: q' ← insert_(q,x)
// TreeNode q_prime = simpleInsert_(q, x_indx);
// //4: p' ← p with child q replaced with q'
// p.replaceChild(q_indx, q_prime);
// //5: return p'
// return p;
// }
// }
// //6: return rebalance(p, x)
// return rebalance(p, x_indx);
// }
// else
{
for(int q_indx = 0; q_indx < p.numChildren(); q_indx++) //Line 1:
{
TreeNode q = p.getChild(q_indx);
if(q.dist(x_indx) <= q.covdist()) //line 2: d(q,x)≤covdist(q)
{
//3: q' ← insert_(q,x)
TreeNode q_prime = simpleInsert_(q, x_indx);
//4: p' ← p with child q replaced with q'
p.replaceChild(q_indx, q_prime);
//5: return p'
return p;
}
}
//6: return p with x added as a child
p.addChild(new TreeNode(x_indx, p.level-1));
return p;
}
}
@Override
public void insert(V x)
{
// maxDistDirty = true;
simpleInsert(x);
}
private class TreeNode implements Cloneable, Serializable
{
TreeNode parent = null;
int level;
int vec_indx;
DoubleList children_dists;
List<TreeNode> children;
boolean is_sorted = true;
double maxdist = -1;
public TreeNode(int vec_indx)
{
this(vec_indx, -110);
}
public TreeNode(int vec_indx, int level)
{
this.vec_indx = vec_indx;
this.level = level;
children = new ArrayList<>();
children_dists = new DoubleList();
}
/**
* Copy constructor. Will not copy the parent node. But will copy children nodes and set their parents appropriately.
* @param toCopy
*/
public TreeNode(TreeNode toCopy)
{
this.level = toCopy.level;
this.vec_indx = toCopy.vec_indx;
if(toCopy.children != null)
{
this.children = new ArrayList<>(toCopy.children.size());
this.children_dists = new DoubleList(toCopy.children_dists);
for(TreeNode childToCopy : toCopy.children)
{
TreeNode child = new TreeNode(childToCopy);
child.parent = this;
this.children.add(child);
}
}
this.is_sorted = toCopy.is_sorted;
this.maxdist = toCopy.maxdist;
}
@Override
protected TreeNode clone()
{
return new TreeNode(this);
}
public void invalidateMaxDist()
{
this.maxdist = -1;
for(TreeNode c : children)
c.invalidateMaxDist();
}
public void invalParentMaxdist()
{
this.maxdist = -2;
if(this.parent != null)
this.parent.invalParentMaxdist();
}
public void findNN(int k, Vec query, List<Double> x_qi, BoundedSortedList<IndexDistPair> knn)
{
Stack<TreeNode> toEval_stack = new Stack<>();
DoubleList dist_to_q_stack = new DoubleList();
{//Quick, add root info to stack for search & prime search Q
double p_x_dist = this.dist(query, x_qi);
dist_to_q_stack.push(p_x_dist);
toEval_stack.push(this);
}
//Search loop
while(!toEval_stack.isEmpty())
{
TreeNode p = toEval_stack.pop();
double p_to_q_dist = dist_to_q_stack.pop();
knn.add(new IndexDistPair(p.vec_indx, p_to_q_dist));
double[] child_query_dist = new double[p.numChildren()];
for(int child_indx = 0; child_indx < p.numChildren(); child_indx++)//compute dists and add to knn while we are at it
{
TreeNode q = p.getChild(child_indx);
child_query_dist[child_indx] = q.dist(query, x_qi);
}
//get them in sorted order
IndexTable it = new IndexTable(child_query_dist);
for(int i_oder = it.length()-1; i_oder >= 0; i_oder--)//reverse order so stack goes in sorted order
{
final int i = it.index(i_oder);
TreeNode q = p.getChild(i);
//4: if d(y,x)>d(y,q)−maxdist(q) then
if(knn.size() < k || knn.last().getDist() > child_query_dist[i] - q.maxdist())
{//Add to the search Q
toEval_stack.push(q);
dist_to_q_stack.push(child_query_dist[i]);
}
}
}
}
//This is the old search code, new code (above) avoids recursion and makes explicit stack
private void findNN_recurse(int k, Vec x, List<Double> x_qi, BoundedSortedList<IndexDistPair> knn, double my_dist_to_x)
{
TreeNode p = this;
double p_x_dist;
if(my_dist_to_x < 0)
{
p_x_dist = p.dist(x, x_qi);
}
else
p_x_dist = my_dist_to_x;
knn.add(new IndexDistPair(p.vec_indx, p_x_dist));
//1: if d(p,x)<d(y,x) then, handled implicitly by knn object
// if(knn.size() < k || p_x_dist < knn.last().getDist())
// knn.add(new ProbailityMatch<V>(p_x_dist, vecs.get(p.vec_indx)));//2: y <= p
//3: for each child q of p sorted by *distance to x* do
double[] q_x_dist = new double[p.numChildren()];
for(int q_indx = 0; q_indx < p.numChildren(); q_indx++)//compute dists and add to knn while we are at it
{
TreeNode q = p.getChild(q_indx);
q_x_dist[q_indx] = q.dist(x, x_qi);
//DO NOT ADD DISTANCE TO KNN YET, we will do it recursively
//need to avoid it so bound check below will work propertly
//and otherwise we would double count
// knn.add(new ProbailityMatch<V>(q_x_dist[q_indx], vecs.get(q.vec_indx)));
// q.findNN(k, x, x_qi, knn);
}
//get them in sorted order
IndexTable it = new IndexTable(q_x_dist);
for(int i_oder = 0; i_oder < it.length(); i_oder++)
{
final int i = it.index(i_oder);
TreeNode q = p.getChild(i);
// knn.add(new ProbailityMatch<V>(q_x_dist[i], vecs.get(q.vec_indx)));
//4: if d(y,x)>d(y,q)−maxdist(q) then
// if(knn.size() < k || knn.last().getDist() > q.dist(y_vec, dm.getQueryInfo(y_vec)) - q.maxdist())
if(knn.size() < k || knn.last().getDist() > q_x_dist[i] - q.maxdist())
q.findNN_recurse(k, x, x_qi, knn, q_x_dist[i]);//Line 5:
// else if(q.isLeaf())
// {
// knn.add(new ProbailityMatch<V>(q.dist(x, x_qi), vecs.get(q.vec_indx)));
// }
}
}
public void findNN(double radius, Vec x, List<Double> x_qi, List<Integer> neighbors, List<Double> distances, double my_dist_to_x)
{
TreeNode p = this;
double p_x_dist;
if(my_dist_to_x < 0)
{
p_x_dist = p.dist(x, x_qi);
}
else
p_x_dist = my_dist_to_x;
if(p_x_dist <= radius)
{
neighbors.add(p.vec_indx);
distances.add(p_x_dist);
}
//3: for each child q of p , no need to sort b/c radius search
double[] q_x_dist = new double[p.numChildren()];
for(int q_indx = 0; q_indx < p.numChildren(); q_indx++)//compute dists and add to knn while we are at it
{
TreeNode q = p.getChild(q_indx);
q_x_dist[q_indx] = q.dist(x, x_qi);
//DO NOT ADD DISTANCE TO KNN YET, we will do it on recursion
}
//get them in sorted order
for(int i = 0; i < q_x_dist.length; i++)
{
TreeNode q = p.getChild(i);
//4: if d(y,x)>d(y,q)−maxdist(q) then
if(radius > q_x_dist[i] - q.maxdist())
q.findNN(radius, x, x_qi, neighbors, distances, q_x_dist[i]);//Line 5:
}
}
public int magnitude()
{
int count = 1;
for(int i = 0; i < numChildren(); i++)
count += getChild(i).magnitude();
return count;
}
public boolean isLeaf()
{
return this.children == null || this.children.isEmpty();
}
public int numChildren()
{
return this.children.size();
}
public TreeNode getChild(int indx)
{
return this.children.get(indx);
}
public void addChild(TreeNode child)
{
// int new_level = Math.max(this.level, child.level+1);
double dist_to_c = this.dist(child.vec_indx);
// int insert_indx = Collections.binarySearch(children_dists, dist_to_c);
// if(insert_indx < 0)//no exact match, convert to insertion index
// insert_indx = -insert_indx-1;
int insert_indx = this.children.size();
this.children.add(insert_indx, child);
this.children_dists.add(insert_indx, dist_to_c);
// child.setLevel(new_level-1);
// this.setLevel(new_level);
this.fixChildrenLevel();
// this.maxdist = -1;//no logner valid, so clear it
this.invalParentMaxdist();
}
public void replaceChild(int orig_index, TreeNode child)
{
double dist_to_c = this.dist(child.vec_indx);
this.children.set(orig_index, child);
this.children_dists.set(orig_index, dist_to_c);
// child.setLevel(new_level-1);
// this.setLevel(new_level);
// this.maxdist = -1;//no logner valid, so clear it
this.fixChildrenLevel();
this.invalParentMaxdist();
// this.children.remove(orig_index);
// this.children_dists.removeD(orig_index);
// this.addChild(child);
// int new_level = Math.max(this.level, child.level+1);
// this.children.set(orig_index, child);
// this.children_dists.set(orig_index, this.dist(child.vec_indx));
// child.setLevel(new_level-1);
// this.setLevel(new_level);
}
public void removeChild(int orig_index)
{
this.children.remove(orig_index);
this.children_dists.remove(orig_index);
// this.fixChildrenLevel();
this.invalParentMaxdist();
}
/**
* Removes a descendant of this node that is a leaf node.
* @return the descendant that was removed
*/
public TreeNode removeAnyLeaf()
{
if(this.isLeaf())
throw new RuntimeException("BUG: node has no children to rmeove");
//lets just grab the furthest child?
TreeNode child = children.get(children.size()-1);
if(child.isLeaf())
{
child.invalParentMaxdist();
children.remove(children.size()-1);
children_dists.remove(children_dists.size()-1);
return child;
}
else//need to remove one of child's descentants to get a leaf
{
return child.removeAnyLeaf();
}
}
public double dist(TreeNode q)
{
return dm.dist(this.vec_indx, q.vec_indx, vecs, accell_cache);
}
public double dist(int x_indx)
{
return dm.dist(this.vec_indx, x_indx, vecs, accell_cache);
}
public double dist(Vec x, List<Double> qi)
{
return dm.dist(this.vec_indx, x, qi, vecs, accell_cache);
}
public void setLevel(int level)
{
// if(this.level == level)
// return;//levels are already set correctly
this.level = level;
// for(TreeNode q : children)
// q.setLevel(level-1);
}
public void fixLevel()
{
// double maxDist = Math.pow(1.3, -110);
double maxDist = pow(-110);
for(int i = 0; i < numChildren(); i++)
maxDist = Math.max(maxDist, this.children_dists.getD(i));
// this.level = (int) Math.ceil(Math.log(maxDist)/Math.log(1.3));
this.level = (int) Math.ceil(FastMath.log2(maxDist)/log2_base+1e-4);
fixChildrenLevel();
}
public void fixChildrenLevel()
{
for(int i = 0; i < numChildren(); i++)
{
TreeNode c = getChild(i);
if(this.level-1 != c.level)
{
c.level = this.level-1;
c.fixChildrenLevel();
}
}
}
public double covdist()
{
// return Math.pow(1.3, level);
return pow(level);
}
public double sepdist()
{
// return Math.pow(1.3, level-1);
return pow(level-1);
}
private double maxdist()
{
if(isLeaf())
return 0;
if(looseBounds)
return pow(level+1);
if (this.maxdist >= 0)
return maxdist;
//else, maxdist = -1, indicating we need to compute it
Stack<TreeNode> toGetChildrenFrom = new Stack<>();
toGetChildrenFrom.add(this);
while(!toGetChildrenFrom.empty())
{
TreeNode runner = toGetChildrenFrom.pop();
for(int q_indx = 0; q_indx < runner.numChildren(); q_indx++)
{
TreeNode q = runner.getChild(q_indx);
maxdist = Math.max(maxdist, this.dist(q.vec_indx));//TODO can optimize for first set of childern, we already have that
toGetChildrenFrom.add(q);
}
}
return maxdist;
}
private Iterator<TreeNode> descendants()
{
final Stack<TreeNode> toIterate = new Stack<>();
toIterate.addAll(children);
Iterator<TreeNode> iter = new Iterator<TreeNode>()
{
@Override
public boolean hasNext()
{
return !toIterate.isEmpty();
}
@Override
public TreeNode next()
{
TreeNode next = toIterate.pop();
toIterate.addAll(next.children);
return next;
}
@Override
public void remove()
{
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
};
return iter;
}
}
}
| 30,611 | 33.865604 | 147 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/DCI.java | /*
* Copyright (C) 2019 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.linear.vectorcollection;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.PriorityQueue;
import java.util.Set;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.math.FastMath;
import jsat.utils.ArrayUtils;
import jsat.utils.DoubleList;
import jsat.utils.IndexTable;
import jsat.utils.Pair;
import jsat.utils.Tuple3;
import jsat.utils.concurrent.ParallelUtils;
/**
* This class implements the Dynamic Continuous Indexing algorithm for nearest
* neighbor search in the {@link EuclideanDistance Euclidean} space only, which
* avoids doing brute force distance computations for the majority of the
* dataset, and requires limited memory. For k-NN search, DCI will return
* approximately correct nearest neighbors, but the mistaken neighbors should
* still be near the query. For radius search, DCI will return the exactly
* correct results.<br>
* <br>
* See:
* <ul>
* <li>Li, K., & Malik, J. (2017). <i>Fast k-Nearest Neighbour Search via
* Prioritized DCI</i>. In Thirty-fourth International Conference on Machine
* Learning (ICML). </li>
* <li>Li, K., & Malik, J. (2016). <i>Fast k-Nearest Neighbour Search via
* Dynamic Continuous Indexing</i>. In M. F. Balcan & K. Q. Weinberger (Eds.),
* Proceedings of The 33rd International Conference on Machine Learning (Vol.
* 48, pp. 671–679). New York, New York, USA: PMLR.</li>
* </ul>
*
* @author Edward Raff <[email protected]>
* @param <V>
*/
public class DCI<V extends Vec> implements VectorCollection<V>
{
private static final long serialVersionUID = -567002398793828933L;
private static EuclideanDistance euclid = new EuclideanDistance();
/**
* the number of simple indices that constitute a composite index
*/
private int m;
/**
* the number of composite indices
*/
private int L;
/**
* m*L random unit vectors in R^d
*/
private Vec[][] u;
/**
* m*L empty binary search trees or skip lists
*/
private NearestIterator[][] T;
private List<V> vecs;
private List<Double> cache;
private void readObject(ObjectInputStream in) throws ClassNotFoundException, IOException
{
m = in.readInt();
L = in.readInt();
int vec_count = in.readInt();
int cache_count = in.readInt();
boolean wasBuilt = in.readBoolean();
if(vec_count > 0)
{
vecs = new ArrayList<>(vec_count);
for(int i = 0; i < vec_count; i++)
vecs.add((V) in.readObject());
}
if(cache_count > 0)
{
cache = new DoubleList(cache_count);
for(int i = 0; i < cache_count; i++)
cache.add(in.readDouble());
}
if (wasBuilt)
{
u = new Vec[m][L];
T = new NearestIterator[m][L];
for(int m_i = 0; m_i < m; m_i++)
for(int L_i = 0; L_i < L; L_i++)
u[m_i][L_i] = (Vec) in.readObject();
for(int m_i = 0; m_i < m; m_i++)
for(int L_i = 0; L_i < L; L_i++)
{
double[] keys = new double[vec_count];
int[] vals = new int[vec_count];
for(int i = 0; i < vec_count; i++)
{
keys[i] = in.readDouble();
vals[i] = in.readInt();
}
T[m_i][L_i] = new NearestIterator(keys, vals);
}
}
}
private void writeObject(ObjectOutputStream out) throws IOException
{
out.writeInt(m);
out.writeInt(L);
int vec_count = vecs == null ? 0 : vecs.size();
int cache_count = cache == null ? 0 : vecs.size();
out.writeInt(vec_count);
out.writeInt(cache_count);
out.writeBoolean(T != null);
for(int i = 0; i < vec_count; i++)
out.writeObject(vecs.get(i));
for(int i = 0; i < cache_count; i++)
out.writeDouble(cache.get(i));
if (T != null)
{
for(int m_i = 0; m_i < m; m_i++)
for(int L_i = 0; L_i < L; L_i++)
out.writeObject(u[m_i][L_i]);
for(int m_i = 0; m_i < m; m_i++)
for(int L_i = 0; L_i < L; L_i++)
{
NearestIterator ni = T[m_i][L_i];
for(int i = 0; i < vec_count; i++)
{
out.writeDouble(ni.keys[i]);
out.writeInt(ni.vals[i]);
}
}
}
}
/**
* Creates a new DCI object, that should provide relatively good result
* quality.
*/
public DCI()
{
this(15, 3);
}
/**
* Creates a new DCI object, result quality depends on the number of simple
* and composite indices
*
* @param m the number of simple indices per composite index (10-15 are
* common values)
* @param L the number of composite indices (2-3 are common values)
*/
public DCI(int m, int L)
{
this.m = m;
this.L = L;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public DCI(DCI<V> toCopy)
{
this.m = toCopy.m;
this.L = toCopy.L;
if(toCopy.u != null)
{
this.u = new Vec[m][L];
this.T = new NearestIterator[m][L];
this.vecs = new ArrayList<>(toCopy.vecs);
this.cache = new DoubleList(toCopy.cache);
for(int j = 0; j < m; j++)
{
for(int l = 0; l < L; l++)
{
this.u[j][l] = toCopy.u[j][l].clone();
this.T[j][l] = toCopy.T[j][l].clone();
}
}
}
}
@Override
public void build(boolean parallel, List<V> collection, DistanceMetric dm)
{
//really just checking dm == euclidean
setDistanceMetric(dm);
this.vecs = new ArrayList<>(collection);
this.cache = euclid.getAccelerationCache(vecs, parallel);
int d = collection.get(0).length();
int n = collection.size();
//Init u
u = new Vec[m][L];
for(int j = 0; j < m; j++)
for(int l = 0; l < L; l++)
{
u[j][l] = DenseVector.random(d);
u[j][l].mutableDivide(u[j][l].pNorm(2));
}
//Init T
T = new NearestIterator[m][L];
//TODO, add more complex logic to balance parallelization over m&l loop as well as inner most loop
//Insertions
for(int j = 0; j < m; j++)
{
for(int l = 0; l < L; l++)
{
Vec u_jl = u[j][l];
double[] keys = new double[n];
int[] vals = new int[n];
ParallelUtils.run(parallel, n, (start, end)->
{
for(int i = start; i < end; i++)
{
double p_bar = vecs.get(i).dot(u_jl);
keys[i] = p_bar;
vals[i] = i;
}
});
T[j][l] = new NearestIterator(keys, vals);
}
}
}
@Override
public void setDistanceMetric(DistanceMetric dm)
{
if(!(dm instanceof EuclideanDistance))
throw new IllegalArgumentException("DCI only works for Euclidean Distance Searches");
}
@Override
public DistanceMetric getDistanceMetric()
{
return new EuclideanDistance();
}
@Override
public void search(Vec query, double range, List<Integer> neighbors, List<Double> distances)
{
int n = vecs.size();
int[][] C = new int[L][n];
double[][] q_bar = new double[m][L];
for(int j = 0; j < m; j++)
for(int l = 0; l < L; l++)
q_bar[j][l] = query.dot(u[j][l]);
List<Set<Integer>> S = new ArrayList<>();
for(int l = 0; l < L; l++)
S.add(new HashSet<>());
List<List<Iterator<Pair<Double, Integer>>>> q_iters = new ArrayList<>(m);
for(int j = 0; j < m; j++)
{
List<Iterator<Pair<Double, Integer>>> iter_m = new ArrayList<>(L);
for(int l = 0; l < L; l++)
{
iter_m.add(T[j][l].nnWalk(q_bar[j][l]));
}
q_iters.add(iter_m);
}
///Now iterate to find indecies
for(int l = 0; l < L; l++)
{
Set<Integer> S_l = S.get(l);
for(int j = 0; j < m; j++)
{
Iterator<Pair<Double, Integer>> iter_jl = q_iters.get(j).get(l);
while(iter_jl.hasNext())
{
Pair<Double, Integer> pair = iter_jl.next();
//projection dist is a lower bound. If its > range, def not a candidate
double dist_lower = pair.getFirstItem()-q_bar[j][l];
if(dist_lower > range)
break;
//else, keep going
int indx = pair.getSecondItem();
C[l][indx]++;
if(C[l][indx] == m)//everyone agrees, you might be it
S_l.add(indx);
}
}
}
neighbors.clear();
distances.clear();
//the projected distance is a lower bound. So if its truley in range,
//it must be present in all subsets
Map<Integer, Integer> unionCounter = new HashMap<>();
for(Set<Integer> S_l : S)
for(int i : S_l)
unionCounter.put(i, unionCounter.getOrDefault(i, 0)+1);
Set<Integer> candidates = new HashSet<>();
for(Map.Entry<Integer, Integer> entry : unionCounter.entrySet())
if(entry.getValue() == S.size())//you occured in every group? You are a candidate!
candidates.add(entry.getKey());
List<Double> qi = euclid.getQueryInfo(query);
for(int i : candidates)
{
neighbors.add(i);
distances.add(euclid.dist(i, query, qi, vecs, cache));
}
//sort by distance and remove excess
IndexTable it = new IndexTable(distances);
it.apply(neighbors);
it.apply(distances);
int maxIndx = ArrayUtils.bsIndex2Insert(Collections.binarySearch(distances, range));
neighbors.subList(maxIndx, neighbors.size()).clear();
distances.subList(maxIndx, distances.size()).clear();
}
@Override
public void search(Vec query, int numNeighbors, List<Integer> neighbors, List<Double> distances)
{
int n = vecs.size();
int k1 = (int) (m*numNeighbors*(FastMath.log(n)-FastMath.log(numNeighbors)));
int[][] C = new int[L][n];
double[][] q_bar = new double[m][L];
for(int j = 0; j < m; j++)
for(int l = 0; l < L; l++)
q_bar[j][l] = query.dot(u[j][l]);
List<Set<Integer>> S = new ArrayList<>();
for(int l = 0; l < L; l++)
S.add(new HashSet<>());
List<List<Iterator<Pair<Double, Integer>>>> q_iters = new ArrayList<>(m);
for(int j = 0; j < m; j++)
{
List<Iterator<Pair<Double, Integer>>> iter_m = new ArrayList<>(L);
for(int l = 0; l < L; l++)
{
iter_m.add(T[j][l].nnWalk(q_bar[j][l]));
}
q_iters.add(iter_m);
}
//Prep priority Qs
/**
* First value is the priority
* second value is the index j in [0, m) that it came from
* third value is the index i in the vector array of the point being referenced
*/
List<PriorityQueue<Tuple3<Double, Integer, Integer>>> P = new ArrayList<>();
for(int l = 0; l < L; l++)
P.add(new PriorityQueue<>((o1, o2) -> Double.compare(o1.getX(), o2.getX())));
for(int j = 0; j < m; j++)
for(int l = 0; l < L; l++)
{
Pair<Double, Integer> ph = q_iters.get(j).get(l).next();
double priority = Math.abs(ph.getFirstItem()-q_bar[j][l]);
P.get(l).add(new Tuple3<>(priority, j, ph.getSecondItem()));
}
///Now iterate to find indecies
for(int i = 0; i < k1; i++)
{
for(int l = 0; l < L; l++)
{
Set<Integer> S_l = S.get(l);
PriorityQueue<Tuple3<Double, Integer, Integer>> P_l = P.get(l);
if(S_l.size() < numNeighbors)
{
Tuple3<Double, Integer, Integer> ph = P_l.poll();
int j = ph.getY();
int h_jl = ph.getZ();
Pair<Double, Integer> next_ph = q_iters.get(j).get(l).next();
double priority = Math.abs(next_ph.getFirstItem()-q_bar[j][l]);
P.get(l).add(new Tuple3<>(priority, j, next_ph.getSecondItem()));
C[l][h_jl]++;
if(C[l][h_jl] == m)
S_l.add(h_jl);
}
}
//We haven't even found as many candidates as we have neighbors we are looking for? Up the iterations then!
if(i == k1-1 && S.stream().mapToInt(s->s.size()).min().getAsInt() < numNeighbors)
k1 *= 2;
}
neighbors.clear();
distances.clear();
Set<Integer> candidates = new HashSet<>();
for(Set<Integer> S_l : S)
candidates.addAll(S_l);
List<Double> qi = euclid.getQueryInfo(query);
for(int i : candidates)
{
neighbors.add(i);
distances.add(euclid.dist(i, query, qi, vecs, cache));
}
//sort by distance and remove excess
IndexTable it = new IndexTable(distances);
it.apply(neighbors);
it.apply(distances);
neighbors.subList(numNeighbors, neighbors.size()).clear();
distances.subList(numNeighbors, distances.size()).clear();
}
@Override
public V get(int indx)
{
return vecs.get(indx);
}
@Override
public List<Double> getAccelerationCache()
{
return cache;
}
@Override
public int size()
{
return vecs.size();
}
@Override
public DCI<V> clone()
{
return new DCI<>(this);
}
/**
* We need to be able to store a pair of tuples <Double, Integer>, and given
* a query double q, iterate through the points in the collection based on
* which tuples are closest to the query. TreeMap dosn't let us do this. So
* custom class to implement the logic in a compact manner as arrays.
*/
static class NearestIterator implements Serializable
{
public double[] keys;
public int[] vals;
public NearestIterator(double[] keys, int[] vals)
{
this.keys = keys;
this.vals = vals;
if(keys.length != vals.length)
throw new IllegalArgumentException("Keys and vales should have the same length");
IndexTable it = new IndexTable(keys);
it.apply(keys);
it.apply(vals);
}
public NearestIterator(NearestIterator toCopy)
{
this.keys = Arrays.copyOf(toCopy.keys, toCopy.keys.length);
this.vals = Arrays.copyOf(toCopy.vals, toCopy.vals.length);
}
public NearestIterator()
{
}
@Override
protected NearestIterator clone()
{
return new NearestIterator(this);
}
public Iterator<Pair<Double, Integer>> nnWalk(double q)
{
return new Iterator<Pair<Double, Integer>>()
{
int upper = ArrayUtils.bsIndex2Insert(Arrays.binarySearch(keys, q));
//upper is now the lowest index of a point that is >= q
int lower = upper-1;
@Override
public boolean hasNext()
{
return lower >= 0 || upper < keys.length;
}
@Override
public Pair<Double, Integer> next()
{
Pair<Double, Integer> toRet = null;
if (lower < 0 && upper >= keys.length)
{
throw new NoSuchElementException();
}
else if (lower < 0)//upper is only option
{
toRet = new Pair<>(keys[upper], vals[upper]);
upper++;
}
else if (upper >= keys.length)//lower is only options
{
toRet = new Pair<>(keys[lower], vals[lower]);
lower--;
}
else if (Math.abs(keys[upper] - q) < Math.abs(keys[lower] - q))
{//upper is closer to q, so return that
toRet = new Pair<>(keys[upper], vals[upper]);
upper++;
}
else//lower must be closer
{
toRet = new Pair<>(keys[lower], vals[lower]);
lower--;
}
return toRet;
}
};
}
}
}
| 15,535 | 25.110924 | 112 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/DefaultVectorCollection.java | /*
* Copyright (C) 2018 edwardraff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.linear.vectorcollection;
import java.util.List;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
/**
* This class is a generic wrapper for the Vector Collection objects within
* JSAT. It will attempt to select a good choice for any given dataset and
* distance metric combination at runtime.
*
* @author Edward Raff
*/
public class DefaultVectorCollection<V extends Vec> implements VectorCollection<V>
{
private DistanceMetric dm;
VectorCollection<V> base;
public DefaultVectorCollection()
{
this(new EuclideanDistance());
}
public DefaultVectorCollection(DistanceMetric dm)
{
setDistanceMetric(dm);
}
public DefaultVectorCollection(DistanceMetric dm, List<V> vecs)
{
this(dm, vecs, false);
}
public DefaultVectorCollection(DistanceMetric dm, List<V> vecs, boolean parallel)
{
setDistanceMetric(dm);
build(parallel, vecs, dm);
}
public DefaultVectorCollection(DefaultVectorCollection toCopy)
{
this.dm = toCopy.dm.clone();
if (toCopy.base != null)
this.base = toCopy.base.clone();
}
@Override
public void build(boolean parallel, List<V> collection, DistanceMetric dm)
{
int N = collection.size();
if(N <= 20 || !dm.isValidMetric())
base = new VectorArray<>();
else
base = new VPTreeMV<>();
base.build(parallel, collection, dm);
}
@Override
public List<Double> getAccelerationCache()
{
return base.getAccelerationCache();
}
@Override
public void setDistanceMetric(DistanceMetric dm)
{
this.dm = dm;
}
@Override
public DistanceMetric getDistanceMetric()
{
return dm;
}
@Override
public void search(Vec query, double range, List<Integer> neighbors, List<Double> distances)
{
base.search(query, range, neighbors, distances);
}
@Override
public void search(Vec query, int numNeighbors, List<Integer> neighbors, List<Double> distances)
{
base.search(query, numNeighbors, neighbors, distances);
}
@Override
public V get(int indx)
{
return base.get(indx);
}
@Override
public int size()
{
return base.size();
}
@Override
public DefaultVectorCollection<V> clone()
{
return new DefaultVectorCollection<>(this);
}
}
| 3,225 | 25.016129 | 100 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/DualTree.java | /*
* Copyright (C) 2018 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.linear.vectorcollection;
import static java.lang.Math.*;
import java.util.ArrayList;
import java.util.List;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.utils.BoundedSortedList;
import jsat.utils.DoubleList;
import jsat.utils.IndexTable;
import jsat.utils.IntList;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.RecursiveAction;
import java.util.function.BiFunction;
import jsat.utils.ListUtils;
/**
*
* @author Edward Raff
* @param <V>
*/
public interface DualTree<V extends Vec> extends VectorCollection<V>
{
public IndexNode getRoot();
@Override
public DualTree<V> clone();
default public double dist(int self_index, int other_index, DualTree<V> other)
{
return getDistanceMetric().dist(this.get(self_index), other.get(self_index));
}
@Override
public void search(Vec query, int numNeighbors, List<Integer> neighbors, List<Double> distances);
@Override
default public void search(VectorCollection<V> VC, int numNeighbors, List<List<Integer>> neighbors, List<List<Double>> distances, boolean parallel )
{
if(!(VC instanceof DualTree))
{
VectorCollection.super.search(VC, numNeighbors, neighbors, distances, parallel);
return;
}
DualTree<V> Q = (DualTree<V>) VC;
//Mpa each node to a cached value. This is used for recursive bound updates
Map<IndexNode, Double> query_B_cache = parallel ? new ConcurrentHashMap<>(Q.size()) : new IdentityHashMap<>(Q.size());
//For each item in Q, we want to find its nearest neighbor in THIS collection.
//each item in Q gets a priority queue of k-nns
List<BoundedSortedList<IndexDistPair>> allPriorities = new ArrayList<>();
for(int i = 0; i < Q.size(); i++)
allPriorities.add(new BoundedSortedList<>(numNeighbors));
///For simplicity and fast calculations, lets combine acceleration caches into one view
final List<Double> this_cache = this.getAccelerationCache();
final List<Double> other_cache = Q.getAccelerationCache();
final int N_r = this.size();
final List<Double> wholeCache = this_cache == null ? null : new DoubleList(ListUtils.mergedView(this_cache, other_cache));
final List<Vec> allVecs = new ArrayList<>(N_r+Q.size());
for(int i = 0; i < N_r; i++)
allVecs.add(this.get(i));
for(int i = 0; i < Q.size(); i++)
allVecs.add(Q.get(i));
DistanceMetric dm = getDistanceMetric();
BaseCaseDT base;
if(!parallel)//easy serial case
base = (int r_indx, int q_indx) ->
{
double d = dm.dist(r_indx, N_r+q_indx, allVecs, wholeCache);
allPriorities.get(q_indx).add(new IndexDistPair(r_indx, d));
return d;
};
else//slightly more complicated parallel case
base = (int r_indx, int q_indx) ->
{
double d = dm.dist(r_indx, N_r + q_indx, allVecs, wholeCache);
BoundedSortedList<IndexDistPair> target = allPriorities.get(q_indx);
synchronized (target)
{
target.add(new IndexDistPair(r_indx, d));
}
return d;
};
ScoreDTLazy score = (IndexNode ref, IndexNode query, double origScore) ->
{
if(origScore < 0)
return ref.minNodeDistance(query);
double bound_final = computeKnnBound(query, numNeighbors, allPriorities, query_B_cache);
// System.out.println(bound_final);
final double d_min_b = origScore;
if(Double.isFinite(bound_final))
{
if(d_min_b > bound_final)//YAY we can prune!
return Double.NaN;
}
//default case, don't prune
return d_min_b;
};
traverse(Q, base, score, true, parallel);
neighbors.clear();
distances.clear();
for(int i = 0; i < Q.size(); i++)
{
IntList n = new IntList(numNeighbors);
DoubleList d = new DoubleList(numNeighbors);
BoundedSortedList<IndexDistPair> knn = allPriorities.get(i);
for(int j = 0; j < knn.size(); j++)
{
IndexDistPair ip = knn.get(j);
n.add(ip.getIndex());
d.add(ip.getDist());
}
neighbors.add(n);
distances.add(d);
}
}
default public double computeKnnBound(IndexNode query, int numNeighbors, List<BoundedSortedList<IndexDistPair>> allPriorities, Map<IndexNode, Double> query_B_cache)
{
double lambda_q = query.furthestDescendantDistance();
double bound_1 = Double.NEGATIVE_INFINITY;
//bound3 will re-use loop of bound_1 calc
double bound_3 = Double.POSITIVE_INFINITY;
for(int c = 0; c < query.numChildren(); c++)
{
IndexNode n_c = query.getChild(c);
double B_nc = query_B_cache.getOrDefault(n_c, Double.POSITIVE_INFINITY);
bound_1 = max(bound_1, B_nc);
bound_3 = min(bound_3, B_nc + 2*max(0, lambda_q-n_c.furthestDescendantDistance()));
}
//bound 1 & 3 loop over points, lets do bound 2 during same loop
///compute bound 2i. First set to infinity, and find min portion
double bound_2i = Double.POSITIVE_INFINITY;
for(int p = 0; p < query.numPoints(); p++)
{
BoundedSortedList<IndexDistPair> D_p = allPriorities.get(query.getPoint(p));
synchronized(D_p)
{
if(D_p.size() == numNeighbors)
{
double d = D_p.last().dist;
bound_2i = min(bound_2i, d);
bound_1 = max(bound_1, d);
}
else//can't bound B_1
{
bound_1 = Double.POSITIVE_INFINITY;
}
}
}
if(Double.isInfinite(bound_1))//cant bound
bound_1 = Double.POSITIVE_INFINITY;
//then add the remaining 2 terms, which are constant for a given Node Q. If no valid points, bound remains infinite
bound_2i += query.furthestPointDistance() + lambda_q;
//Compute 3rd bound
IndexNode q_parrent = query.getParrent();
// System.out.println(bound_3);
double bound_4 = q_parrent == null ? Double.POSITIVE_INFINITY : query_B_cache.getOrDefault(q_parrent, Double.POSITIVE_INFINITY);
final double bound_final = min(min(bound_1, bound_2i), min(bound_3, bound_4));
// final double bound_final = min(min(bound_1, bound_4), bound_2i);
//update cache with min value
query_B_cache.put(query, bound_final);
// return Double.MAX_VALUE;
return bound_final;
}
@Override
default public void search(VectorCollection<V> VC, double r_min, double r_max, List<List<Integer>> neighbors, List<List<Double>> distances, boolean parallel )
{
if(!(VC instanceof DualTree))
{
VectorCollection.super.search(VC, r_min, r_max, neighbors, distances, parallel);
return;
}
DualTree<V> Q = (DualTree<V>) VC;
neighbors.clear();
distances.clear();
for(int i = 0; i < Q.size(); i++)
{
neighbors.add(new IntList());
distances.add(new DoubleList());
}
///For simplicity and fast calculations, lets combine acceleration caches into one view
final List<Double> this_cache = this.getAccelerationCache();
final List<Double> other_cache = Q.getAccelerationCache();
final int N_r = this.size();
final List<Double> wholeCache = this_cache == null ? null : ListUtils.mergedView(this_cache, other_cache);
final List<Vec> allVecs = new ArrayList<>(N_r+Q.size());
for(int i = 0; i < N_r; i++)
allVecs.add(this.get(i));
for(int i = 0; i < Q.size(); i++)
allVecs.add(Q.get(i));
DistanceMetric dm = getDistanceMetric();
BaseCaseDT base = (int r_indx, int q_indx) ->
{
double d = dm.dist(r_indx, N_r+q_indx, allVecs, wholeCache);
if(r_min <= d && d <= r_max)
{
synchronized(neighbors.get(q_indx))
{
neighbors.get(q_indx).add(r_indx);
distances.get(q_indx).add(d);
}
}
return d;
};
ScoreDT score = (IndexNode ref, IndexNode query) ->
{
double[] minMax = ref.minMaxDistance(query);
double d_min = minMax[0];
double d_max = minMax[1];
if(d_min > r_max || d_max < r_min)//If min dist is greater than max-range, or max distance is greater than min-range, we can prune
return Double.NaN;
if(r_min < d_min && d_max < r_max)//Bound says ALL DECENDENTS BELONG, so lets do that!
{
IntList r_dec = new IntList();
for(Iterator<Integer> iter = ref.DescendantIterator(); iter.hasNext(); )
r_dec.add(iter.next());
IntList q_dec = new IntList();
for(Iterator<Integer> iter = query.DescendantIterator(); iter.hasNext(); )
q_dec.add(iter.next());
for(int i : r_dec)
{
for(int j : q_dec)
{
double d = dm.dist(i, N_r+j, allVecs, wholeCache);
synchronized(neighbors.get(j))
{
neighbors.get(j).add(i);
distances.get(j).add(d);
}
}
}
//Return NaN so that search stops, we added everyone!
return Double.NaN;
}
return d_min;
};
//Range search dosn't benefit from improved search order. So use basic one and avoid extra overhead
traverse(Q, base, score, false, parallel);
//Now lets sort the returned lists
for(int i = 0; i < neighbors.size(); i++)
{
IndexTable it = new IndexTable(distances.get(i));
it.apply(distances.get(i));
it.apply(neighbors.get(i));
}
}
default void traverse(DualTree<V> Q, BaseCaseDT base, ScoreDT score, boolean improvedTraverse, boolean parallel)
{
IndexNode R_root = this.getRoot(), Q_root = Q.getRoot();
if(!this.getRoot().allPointsInLeaves())//warp the roots so that we can use the same traversal for all implementations
{
R_root = new SelfAsChildNode<>(this.getRoot());
Q_root = new SelfAsChildNode<>(Q.getRoot());
}
if(parallel)
ForkJoinPool.commonPool().invoke(new DualTreeTraversalAction(R_root, Q_root, base, score, improvedTraverse));
else
dual_depth_first(R_root, Q_root, base, score, improvedTraverse);
}
/**
* This class is used as a helper class to deal with Dual Trees which may
* contain points in branching nodes. The dual tree traversal assumes all
* points belong in leaf nodes. This fixes that by wraping an IndexNode to
* behave as if all points owned within a branch really belong to a special
* extra "self" child.
*
* @param <N>
*/
class SelfAsChildNode<N extends IndexNode<N>> implements IndexNode<SelfAsChildNode<N>>
{
public boolean asLeaf;
N wrapping;
public SelfAsChildNode(N wrapping)
{
this.wrapping = wrapping;
asLeaf = !wrapping.hasChildren();
}
public SelfAsChildNode(boolean asLeaf, N wrapping)
{
this.asLeaf = asLeaf;
this.wrapping = wrapping;
}
@Override
public double furthestPointDistance()
{
if(!asLeaf)//Not acting as a leaf, so you don't have children!
return 0;
//else, return the answer
return wrapping.furthestPointDistance();
}
@Override
public double furthestDescendantDistance()
{
if(asLeaf)
return wrapping.furthestPointDistance();
else
return wrapping.furthestDescendantDistance();
}
@Override
public int numChildren()
{
if(asLeaf)
return 0;
else
return wrapping.numChildren() + 1;//+1 for self child
}
@Override
public IndexNode getChild(int indx)
{
if(indx == wrapping.numChildren())
return new SelfAsChildNode(true, wrapping);
//else, return base children
return new SelfAsChildNode(wrapping.getChild(indx));
}
@Override
public Vec getVec(int indx)
{
return wrapping.getVec(indx);
}
@Override
public int numPoints()
{
if(asLeaf)
return wrapping.numPoints();
else
return 0;
}
@Override
public int getPoint(int indx)
{
if(asLeaf)
return wrapping.getPoint(indx);
else//we can't have children if we aren't a leaf node!
throw new IndexOutOfBoundsException("Leaf node does not have any children");
}
@Override
public SelfAsChildNode<N> getParrent()
{
if(asLeaf)
if(wrapping.hasChildren())//we are a branch node and acting as a leaf, so parrent its our non-leaf self
return new SelfAsChildNode<>(false, wrapping);
//we are true leaf node, parrent is just parrent
// OR
// we are not a leaf node, parrent is again just parrent
N parrent = wrapping.getParrent();
if(parrent == null)
return null;
return new SelfAsChildNode<>(false, parrent);
}
@Override
public double minNodeDistance(SelfAsChildNode<N> other)
{
return wrapping.minNodeDistance(other.wrapping);
}
@Override
public double maxNodeDistance(SelfAsChildNode<N> other)
{
return wrapping.maxNodeDistance(other.wrapping);
}
@Override
public double minNodeDistance(int other)
{
return wrapping.minNodeDistance(other);
}
@Override
public boolean equals(Object obj)
{
if(obj instanceof SelfAsChildNode)
{
SelfAsChildNode other = (SelfAsChildNode) obj;
if(this.asLeaf == other.asLeaf)
return this.wrapping.equals(other.wrapping);
}
return false;
}
@Override
public int hashCode()
{
int hash = 5;
hash = 71 * hash + (this.asLeaf ? 1 : 0);
if(this.wrapping == null)
System.out.println();
hash = 71 * hash + this.wrapping.hashCode();
return hash;
}
@Override
public double[] minMaxDistance(SelfAsChildNode<N> other)
{
return wrapping.minMaxDistance(other.wrapping);
}
}
static final double COMP_SCORE = -1;
public static void dual_depth_first(IndexNode n_r, IndexNode n_q, BaseCaseDT base, ScoreDT score, boolean improvedSearch)
{
//Algo 10 in Thesis
//3: {Perform base cases for points in node combination.}
for(int i = 0; i < n_r.numPoints(); i++)
for(int j = 0; j < n_q.numPoints(); j++)
base.base_case(n_r.getPoint(i), n_q.getPoint(j));
//7: {Assemble list of combinations to recurse into.}
//8: q←empty priority queue
PriorityQueue<IndexTuple> q = new PriorityQueue<>();
//9: if Nq andNr both have children then
if(n_q.hasChildren() && n_r.hasChildren())
{
//the Algorithm 10 version. Simpler but not as efficent
if(!improvedSearch)
{
for(int i = 0; i < n_r.numChildren(); i++)
for(int j = 0; j < n_q.numChildren(); j++)
{
IndexNode n_r_i = n_r.getChild(i);
IndexNode n_q_j = n_q.getChild(j);
double s = score.score(n_r_i, n_q_j, COMP_SCORE);
if(!Double.isNaN(s))
q.offer(new IndexTuple(n_r_i, n_q_j, s));
}
}
else //Below is the Algo 13 version.
{
for(int c = 0; c < n_q.numChildren(); c++)
{
IndexNode n_q_c = n_q.getChild(c);
List<IndexTuple> q_qc =new ArrayList<>();
boolean all_scores_same = true;
for(int i = 0; i < n_r.numChildren(); i++)
{
IndexNode n_r_i = n_r.getChild(i);
double s = score.score(n_r_i, n_q_c, COMP_SCORE);
//check if all scores have the same value
if(i > 0 && abs(q_qc.get(i-1).priority-s) < 1e-13)
all_scores_same = false;
q_qc.add(new IndexTuple(n_r_i, n_q_c, s));
}
if(all_scores_same && q_qc.get(0).priority > 0)
{
double s = score.score(n_r, n_q_c, COMP_SCORE);
if(s > q_qc.get(0).priority)
q.offer(new IndexTuple(n_r, n_q_c, s));
else
q.addAll(q_qc);
}
else
q.addAll(q_qc);
}
}
}
else if(n_q.hasChildren()) //implicitly n_r has not children if this check passes
{
for(int j = 0; j < n_q.numChildren(); j++)
{
IndexNode n_q_j = n_q.getChild(j);
double s = score.score(n_r, n_q_j, COMP_SCORE);
if (!Double.isNaN(s))
q.offer(new IndexTuple(n_r, n_q_j, s));
}
}
else if(n_r.hasChildren())// implicitly n_q has no children if this check passes
{
for (int i = 0; i < n_r.numChildren(); i++)
{
IndexNode n_r_i = n_r.getChild(i);
double s = score.score(n_r_i, n_q, COMP_SCORE);
if (!Double.isNaN(s))
q.offer(new IndexTuple(n_r_i, n_q, s));
}
}
//22: {Recurse into combinations with highest priority first.
while(!q.isEmpty())
{
IndexTuple toProccess = q.poll();
// System.out.println(toProccess.priority);
if(score instanceof ScoreDTLazy)//re-compute the score before we just go in
{
double s = score.score(toProccess.a, toProccess.b, toProccess.priority);
if(Double.isNaN(s))//We might have a pruning op now
{
continue;//Good job!
}
}
dual_depth_first(toProccess.a, toProccess.b, base, score, improvedSearch);
}
}
class DualTreeTraversalAction extends RecursiveAction implements Comparable<DualTreeTraversalAction>
{
IndexNode n_r;
IndexNode n_q;
BaseCaseDT base;
ScoreDT score;
boolean improvedSearch;
double priority;
public DualTreeTraversalAction(IndexNode n_r, IndexNode n_q, BaseCaseDT base, ScoreDT score, boolean improvedSearch)
{
this(n_r, n_q, base, score, improvedSearch, 0.0);
}
public DualTreeTraversalAction(IndexNode n_r, IndexNode n_q, BaseCaseDT base, ScoreDT score, boolean improvedSearch, double priority)
{
this.n_r = n_r;
this.n_q = n_q;
this.base = base;
this.score = score;
this.improvedSearch = improvedSearch;
this.priority = priority;
}
@Override
protected void compute()
{
/*
* B/c of fork-join framework, we can't do the ScoreDTLazy
* check before placing them into the execution que. So we will do
* them at the root no upon ourselves. We can do that b/c priority
* is the score for the pair of IndexNodes we are about to process!
*/
if(score instanceof ScoreDTLazy)//re-compute the score before we do work
{
double s = score.score(n_r, n_q, priority);
if(Double.isNaN(s))//We might have a pruning op now
return;//Good job! No more work to do
}
//Algo 10 in Thesis
//3: {Perform base cases for points in node combination.}
for(int i = 0; i < n_r.numPoints(); i++)
for(int j = 0; j < n_q.numPoints(); j++)
base.base_case(n_r.getPoint(i), n_q.getPoint(j));
//7: {Assemble list of combinations to recurse into.}
//8: q←empty priority queue
PriorityQueue<DualTreeTraversalAction> q = new PriorityQueue<>();
//9: if Nq andNr both have children then
if(n_q.hasChildren() && n_r.hasChildren())
{
//the Algorithm 10 version. Simpler but not as efficent
if(!improvedSearch)
{
for(int i = 0; i < n_r.numChildren(); i++)
for(int j = 0; j < n_q.numChildren(); j++)
{
IndexNode n_r_i = n_r.getChild(i);
IndexNode n_q_j = n_q.getChild(j);
double s = score.score(n_r_i, n_q_j, COMP_SCORE);
if(!Double.isNaN(s))
q.offer(new DualTreeTraversalAction(n_r_i, n_q_j, base, score, improvedSearch, s));
}
}
else //Below is the Algo 13 version.
{
for(int c = 0; c < n_q.numChildren(); c++)
{
IndexNode n_q_c = n_q.getChild(c);
List<DualTreeTraversalAction> q_qc =new ArrayList<>();
boolean all_scores_same = true;
for(int i = 0; i < n_r.numChildren(); i++)
{
IndexNode n_r_i = n_r.getChild(i);
double s = score.score(n_r_i, n_q_c, COMP_SCORE);
//check if all scores have the same value
if(i > 0 && abs(q_qc.get(i-1).priority-s) < 1e-13)
all_scores_same = false;
q_qc.add(new DualTreeTraversalAction(n_r_i, n_q_c, base, score, improvedSearch, s));
}
if(all_scores_same)
{
double s = score.score(n_r, n_q_c, COMP_SCORE);
if(s > q_qc.get(0).priority)
q.offer(new DualTreeTraversalAction(n_r, n_q_c, base, score, improvedSearch, s));
else
q.addAll(q_qc);
}
else
q.addAll(q_qc);
}
}
}
else if(n_q.hasChildren()) //implicitly n_r has not children if this check passes
{
for(int j = 0; j < n_q.numChildren(); j++)
{
IndexNode n_q_j = n_q.getChild(j);
double s = score.score(n_r, n_q_j, COMP_SCORE);
if (!Double.isNaN(s))
q.offer(new DualTreeTraversalAction(n_r, n_q_j, base, score, improvedSearch, s));
}
}
else if(n_r.hasChildren())// implicitly n_q has no children if this check passes
{
for (int i = 0; i < n_r.numChildren(); i++)
{
IndexNode n_r_i = n_r.getChild(i);
double s = score.score(n_r_i, n_q, COMP_SCORE);
if (!Double.isNaN(s))
q.offer(new DualTreeTraversalAction(n_r_i, n_q, base, score, improvedSearch, s));
}
}
//22: {Recurse into combinations with highest priority first.
invokeAll(q);
}
@Override
public int compareTo(DualTreeTraversalAction o)
{
return Double.compare(this.priority, o.priority);
}
}
}
| 26,555 | 36.350211 | 168 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/IncrementalCollection.java | /*
* Copyright (C) 2017 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.linear.vectorcollection;
import jsat.linear.Vec;
/**
* This interface is for Vector Collections that support incremental
* construction. If all data is available at the onset, it is recommended to use
* the appropriate constructor / bulk insertion as they may be more compute
* efficient or produce better indexes. The incremental insertion of points is
* not guaranteed to result in a collection that is equally as performant in
* either construction or querying. However, it does allow for additions to the
* collection without needing to re-build the entire collection. Efficiency and
* performance of incremental additions will depend on the base implementation.
*
* @author Edward Raff
* @param <V> The type of vectors stored in this collection
*/
public interface IncrementalCollection<V extends Vec> extends VectorCollection<V>
{
/**
* Incrementally adds the given datapoint into the collection
* @param x the vector to add to the collection
*/
public void insert(V x);
@Override
public IncrementalCollection<V> clone();
}
| 1,781 | 38.6 | 81 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/IndexDistPair.java | /*
* Copyright (C) 2017 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.linear.vectorcollection;
/**
* This class exists as a helper method for use with nearest neighbor
* implementations. It stores an integer to represent the index of a vector, and
* a double to store the distance of the index to a given query.
*
* @author Edward Raff
*/
public class IndexDistPair implements Comparable<IndexDistPair>
{
/**
* the index of a vector
*/
protected int indx;
/**
* the distance of this index to a query vector
*/
protected double dist;
public IndexDistPair(int indx, double dist)
{
this.indx = indx;
this.dist = dist;
}
public int getIndex()
{
return indx;
}
public void setIndex(int indx)
{
this.indx = indx;
}
public double getDist()
{
return dist;
}
public void setDist(double dist)
{
this.dist = dist;
}
@Override
public int compareTo(IndexDistPair o)
{
return Double.compare(this.dist, o.dist);
}
}
| 1,737 | 23.478873 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/IndexNode.java | /*
* Copyright (C) 2018 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.linear.vectorcollection;
import java.util.ArrayList;
import java.util.List;
import java.util.PriorityQueue;
import jsat.linear.Vec;
import static java.lang.Math.*;
import java.util.*;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.RecursiveAction;
/**
*
* @author Edward Raff
* @param <N>
*/
public interface IndexNode<N extends IndexNode>
{
/**
*
* @return returns the parent node to this one, or {@code null} if this node is the root.
*/
public N getParrent();
/**
* This method returns a lower bound on the minimum distance from a point in
* or owned by this node to any point in or owner by the {@code other} node.
* Because the value return is a lower bound, 0 would always be a valid
* return value.
*
* @param other the other node of points to get the minimum distance to
* @return a lower bound on the minimum distance.
*/
public double minNodeDistance(N other);
public double maxNodeDistance(N other);
/**
*
* @param other
* @return an array where the first value is the minimum distance between nodes, and second value is the maximum
*/
public default double[] minMaxDistance(N other)
{
return new double[]{minNodeDistance(other), maxNodeDistance(other)};
}
/**
* This method returns a lower bound on the minimum distance from a point in
* or owned by this node to the {@code other} point.
* Because the value return is a lower bound, 0 would always be a valid
* return value.
* @param other
* @return
*/
public double minNodeDistance(int other);
/**
* Gets the distance from this node (or its centroid) to it's parent node (or centroid).
* @return
*/
public default double getParentDistance()
{
N parent = getParrent();
if(parent == null)
return 0;//You have no parent
else
return parent.furthestDescendantDistance();//stupid loose default bound
}
/**
* Returns an upper bound on the farthest distance from this node to any of the points it owns. <br>
* <br>
* In the Dual Tree papers, this is often given as ρ(N_i)
* @return an upper bound on the distance
*/
public double furthestPointDistance();
/**
* Returns an upper bound on the farthest distance from this node to any of
* the points it owns or its children own. <br>
* <br> In the Dual Tree papers,
* this is often given as λ(Ni)
* @return an upper bound on the distance
*/
public double furthestDescendantDistance();
/**
*
* @return
*/
public int numChildren();
public IndexNode getChild(int indx);
public Vec getVec(int indx);
public int numPoints();
public int getPoint(int indx);
public default boolean hasChildren()
{
return numChildren() > 0;
}
default public boolean allPointsInLeaves()
{
return true;
}
default public Iterator<Integer> DescendantIterator()
{
Stack<IndexNode<N>> toProcess = new Stack<>();
toProcess.add(this);
return new Iterator<Integer>()
{
int curPointPos = 0;
boolean primed = false;
@Override
public boolean hasNext()
{
do
{
if(toProcess.isEmpty())
{
return false;
}
else if(toProcess.peek().numPoints() >= curPointPos)//we have exaughsted this node, expand search
{
IndexNode tmp = toProcess.pop();
for(int i = 0; i < tmp.numChildren(); i++)
toProcess.add(tmp.getChild(i));
curPointPos = 0;
}
else//we have points that have not been iterated on the stack
return (primed = true);
}
while(!toProcess.isEmpty());
return false;
}
@Override
public Integer next()
{
if(!primed)//call hasNext to get the structures in place and ready
if(!hasNext())
throw new NoSuchElementException();
primed = false;
return toProcess.peek().getPoint(curPointPos++);
}
};
}
}
| 5,344 | 28.860335 | 117 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/IndexTuple.java | /*
* Copyright (C) 2018 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.linear.vectorcollection;
/**
*
* @author Edward Raff
*/
public class IndexTuple implements Comparable<IndexTuple>
{
public IndexNode a;
public IndexNode b;
double priority;
public IndexTuple(IndexNode a, IndexNode b, double priority)
{
this.a = a;
this.b = b;
this.priority = priority;
}
@Override
public int compareTo(IndexTuple o)
{
return Double.compare(this.priority, o.priority);
}
}
| 1,186 | 24.804348 | 72 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/KDTree.java |
package jsat.linear.vectorcollection;
import java.io.Serializable;
import java.util.*;
import java.util.concurrent.ExecutorService;
import jsat.linear.IndexValue;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.*;
import jsat.math.FastMath;
import jsat.math.OnLineStatistics;
import jsat.utils.*;
import jsat.utils.concurrent.ParallelUtils;
/**
* Standard KDTree implementation. KDTrees are fast to create with no distance computations needed.
* Though KDTrees can be constructed in O(n) time, this
* implementation is O(n log n). KDTrees can be very fast for low dimensional
* data queries, but degrade as the dimensions increases. For very high
* dimensions or pathologically bad data, O(n<sup>2</sup>) performance worse
* then {@link VectorArray} can occur.
* <br>
* <br>
* Note: KD trees are only usable with Distance Metrics based off of the pNorm
* between two vectors. The valid distance metrics are
* {@link EuclideanDistance}, {@link ChebyshevDistance}, {@link ManhattanDistance}, {@link MinkowskiDistance}<br>
* <br>
* See:
* <ul>
* <li>Bentley, J. L. (1975). Multidimensional Binary Search Trees Used for
* Associative Searching. Commun. ACM, 18(9), 509–517.
* http://doi.org/10.1145/361002.361007</li>
* <li>Moore, A. (1991). A tutorial on kd-trees (No. Technical Report No.
* 209).</li>
* </ul>
*
* @author Edward Raff
* @param <V> The vector type
*/
public class KDTree<V extends Vec> implements IncrementalCollection<V>
{
private static final long serialVersionUID = -7401342201406776463L;
private DistanceMetric distanceMetric;
private KDNode root;
private PivotSelection pvSelection;
private int size;
private int leaf_node_size = 20;
private List<V> allVecs;
private List<Double> distCache;
/**
* KDTree uses an index of the vector at each stage to use as a pivot,
* dividing the remaining elements into two sets. These control the
* method used to determine the pivot at each step.
*/
public enum PivotSelection
{
/**
* The next pivot will be selected by iteratively going through each possible pivot.
* This method has no additional overhead.
*/
INCREMENTAL,
/**
* The next pivot will be selected by determining which pivot index contains the most variance.
* This method requires an additional O(n d) work per step. Where n is the number of data points
* being split, and d is the dimension of the data set.
*/
VARIANCE,
/**
* The next pivot dimension will be selected as the dimension with the
* maximum spread, with the value selected as the point closest to the
* median value of the spread (i.e., the medoid)
* See: Moore, A. (1991). A tutorial on kd-trees (No. Technical Report
* No. 209).
*/
SPREAD_MEDOID,
}
/**
* Creates a new KDTree with the given data and methods.
*
* @param vecs the list of vectors to place in this structure
* @param distanceMetric the metric to use for the space
* @param pvSelection the method of selection to use for determining what
* pivot to use.
* @param parallel {@code true} if multiple threads should be used for
* construction, {@code false} otherwise.
*/
public KDTree(List<V> vecs, DistanceMetric distanceMetric, PivotSelection pvSelection, boolean parallel)
{
this.distanceMetric = distanceMetric;
this.pvSelection = pvSelection;
build(parallel, vecs, distanceMetric);
}
/**
* Creates a new KDTree with the given data and methods.
*
* @param vecs the list of vectors to place in this structure
* @param distanceMetric the metric to use for the space
* @param pvSelection the method of selection to use for determining what pivot to use.
*/
public KDTree(List<V> vecs, DistanceMetric distanceMetric, PivotSelection pvSelection)
{
this(vecs, distanceMetric, pvSelection, false);
}
/**
* Creates a new KDTree with the given data and methods. <br>
*
* @param vecs the list of vectors to place in this structure
* @param distanceMetric the metric to use for the space
*/
public KDTree(List<V> vecs, DistanceMetric distanceMetric)
{
this(vecs, distanceMetric, PivotSelection.SPREAD_MEDOID);
}
private KDTree(DistanceMetric distanceMetric, PivotSelection pvSelection)
{
setDistanceMetric(distanceMetric);
this.pvSelection = pvSelection;
}
public KDTree(PivotSelection pivotSelection)
{
this(new EuclideanDistance(), pivotSelection);
}
public KDTree()
{
this(PivotSelection.SPREAD_MEDOID);
}
@Override
public List<Double> getAccelerationCache()
{
return distCache;
}
/**
* Sets the number of points stored within a leaf node of the index. Larger
* values avoid search overhead, but reduce opportunities for pruning.
*
* @param leaf_size the size of a leaf node. Must be at least 2
*/
public void setLeafSize(int leaf_size)
{
if (leaf_size < 2)
throw new IllegalArgumentException("The leaf size must be >= 2 to support all splitting methods");
this.leaf_node_size = leaf_size;
}
/**
*
* @return the number of points to store within a leaf node
*/
public int getLeafSize()
{
return leaf_node_size;
}
@Override
public void setDistanceMetric(DistanceMetric dm)
{
if(!( dm instanceof EuclideanDistance || dm instanceof ChebyshevDistance ||
dm instanceof ManhattanDistance || dm instanceof MinkowskiDistance) )
throw new ArithmeticException("KD Trees are not compatible with the given distance metric.");
this.distanceMetric = dm;
}
@Override
public DistanceMetric getDistanceMetric()
{
return distanceMetric;
}
@Override
public void build(boolean parallel, List<V> vecs, DistanceMetric dm)
{
setDistanceMetric(dm);
this.size = vecs.size();
allVecs = vecs = new ArrayList<>(vecs);//copy to avoid altering the input set
distCache = distanceMetric.getAccelerationCache(vecs, parallel);
List<Integer> vecIndices = new IntList(size);
ListUtils.addRange(vecIndices, 0, size, 1);
if(!parallel)
this.root = buildTree(vecIndices, 0, null, null);
else
{
ModifiableCountDownLatch mcdl = new ModifiableCountDownLatch(1);
this.root = buildTree(vecIndices, 0, ParallelUtils.CACHED_THREAD_POOL, mcdl);
try
{
mcdl.await();
}
catch (InterruptedException ex)
{
//Failure, fall back to single threaded version
this.root = buildTree(vecIndices, 0, null, null);
}
}
}
@Override
public void insert(V x)
{
if(allVecs == null)//init
{
allVecs = new ArrayList<>();
distCache = distanceMetric.getAccelerationCache(allVecs);
this.size = 0;
this.root = new KDLeaf(0, new IntList());
}
int indx = size++;
allVecs.add(x);
if(distCache != null)
distCache.addAll(distanceMetric.getQueryInfo(x));
if(root.insert(indx))
root = buildTree(IntList.range(size), 0, null, null);
}
private class KDNode implements Cloneable, Serializable
{
protected int axis;
/**
* The splitting value along the axis
*/
protected double pivot_s;
protected KDNode left;
protected KDNode right;
public KDNode(int axis)
{
this.axis = axis;
}
public KDNode(KDNode toCopy)
{
this(toCopy.axis);
this.pivot_s = toCopy.pivot_s;
if(toCopy.left != null)
this.left = toCopy.left.clone();
if(toCopy.left != null)
this.right = toCopy.right.clone();
}
@SuppressWarnings("unused")
public void setAxis(int axis)
{
this.axis = axis;
}
public void setLeft(KDNode left)
{
this.left = left;
}
public void setRight(KDNode right)
{
this.right = right;
}
@SuppressWarnings("unused")
public int getAxis()
{
return axis;
}
@SuppressWarnings("unused")
public KDNode getLeft()
{
return left;
}
@SuppressWarnings("unused")
public KDNode getRight()
{
return right;
}
@Override
protected KDNode clone()
{
return new KDNode(this);
}
protected void searchK(int k, BoundedSortedList<IndexDistPair> knn, Vec target, List<Double> qi)
{
double target_s = target.get(axis);
boolean target_in_left = target_s <= pivot_s;
KDNode nearKD, farKD;
if(target_in_left)
{
nearKD = left;
farKD = right;
}
else
{
nearKD = right;
farKD = left;
}
nearKD.searchK(k, knn, target, qi);
double maxDistSoFar = Double.MAX_VALUE;
if(knn.size() >= k)
maxDistSoFar = knn.get(k-1).getDist();
if(maxDistSoFar > Math.abs(target_s-pivot_s))
farKD.searchK(k, knn, target, qi);
}
protected void searchR(double radius, List<Integer> vecsInRage, List<Double> distVecsInRange, Vec target, List<Double> qi)
{
double target_s = target.get(axis);
if(radius > target_s-pivot_s)
left.searchR(radius, vecsInRage, distVecsInRange, target, qi);
if(radius > pivot_s-target_s)
right.searchR(radius, vecsInRage, distVecsInRange, target, qi);
}
/**
*
* @param x_indx
* @return {@code true} if this node should be replaced using its children after insertion
*/
protected boolean insert(int x_indx)
{
double target_s = get(x_indx).get(axis);
boolean target_in_left = target_s <= pivot_s;
if (target_in_left)
{
if (left.insert(x_indx))
left = buildTree(((KDLeaf) left).owned, axis + 1, null, null);
}
else
{
if (right.insert(x_indx))
right = buildTree(((KDLeaf) right).owned, axis + 1, null, null);
}
return false;
}
}
private class KDLeaf extends KDNode
{
protected IntList owned;
public KDLeaf(int axis, List<Integer> toOwn)
{
super(axis);
this.owned = new IntList(toOwn);
}
public KDLeaf(KDLeaf toCopy)
{
super(toCopy);
this.owned = new IntList(toCopy.owned);
}
@Override
protected void searchK(int k, BoundedSortedList<IndexDistPair> knn, Vec target, List<Double> qi)
{
for(int i : owned)
{
double dist = distanceMetric.dist(i, target, qi, allVecs, distCache);
knn.add(new IndexDistPair(i, dist));
}
}
@Override
protected void searchR(double radius, List<Integer> vecsInRage, List<Double> distVecsInRange, Vec target, List<Double> qi)
{
for(int i : owned)
{
double dist = distanceMetric.dist(i, target, qi, allVecs, distCache);
if(dist <= radius)
{
vecsInRage.add(i);
distVecsInRange.add(dist);
}
}
}
@Override
protected boolean insert(int x_indx)
{
this.owned.add(x_indx);
return owned.size() >= leaf_node_size*2;
}
@Override
protected KDLeaf clone()
{
return new KDLeaf(this);
}
}
private class VecIndexComparator implements Comparator<Integer>
{
private final int index;
public VecIndexComparator(int index)
{
this.index = index;
}
@Override
public int compare(Integer o1, Integer o2)
{
return Double.compare( allVecs.get(o1).get(index), allVecs.get(o2).get(index));
}
}
/**
*
* @param data subset of data to work on
* @param depth recursion depth
* @param threadpool threadpool source. Null is accepted, and means it will be done immediately
* @param mcdl used to wait on for the original caller, only needed when threadpool is non null
* @return the root tree node for the given set of data
*/
private KDNode buildTree(final List<Integer> data, final int depth, final ExecutorService threadpool, final ModifiableCountDownLatch mcdl)
{
if(data == null || data.isEmpty())
{
if(threadpool != null)//Threadpool null checks since no thread pool means do single threaded
mcdl.countDown();
return null;
}
int mod = allVecs.get(0).length();
if(data.size() <= leaf_node_size)
{
if(threadpool != null)
mcdl.countDown();
// return new KDNode(data.get(0), depth % mod);
return new KDLeaf(depth % mod, data);
}
final boolean isSparse = get(data.get(0)).isSparse();
int pivot = -1;
//Some pivot methods will select the value they want, and so overwrite NaN. Otherwise, use NaN to flag that a median search is needed
double pivot_val = Double.NaN;
switch (pvSelection)
{
case VARIANCE:
OnLineStatistics[] allStats = new OnLineStatistics[mod];
for (int j = 0; j < allStats.length; j++)
allStats[j] = new OnLineStatistics();
for (int i : data)//For each data point
{
V vec = get(i);
for (int j = 0; j < allStats.length; j++)//For each dimension
allStats[j].add(vec.get(j));
}
double maxVariance = -1;
for (int j = 0; j < allStats.length; j++)
{
if (allStats[j].getVarance() > maxVariance)
{
maxVariance = allStats[j].getVarance();
pivot = j;
}
}
if (pivot < 0)//All dims had NaN as variance? Fall back to incremental selection
pivot = depth % mod;
break;
case SPREAD_MEDOID:
//Find the spread of each dimension
double[] mins = new double[mod];
double[] maxs = new double[mod];
Arrays.fill(mins, Double.POSITIVE_INFINITY);
Arrays.fill(maxs, Double.NEGATIVE_INFINITY);
//If sparse, keep a set of indexes we HAVE NOT SEEN
//these have implicity zeros we need to add back at the end
final Set<Integer> neverSeen = isSparse ? new IntSet(ListUtils.range(0, get(0).length())) : Collections.EMPTY_SET;
for(int i : data)
{
V v = get(i);
for(IndexValue iv : v)
{
int d = iv.getIndex();
double val = iv.getValue();
mins[d] = Math.min(mins[d], val);
maxs[d] = Math.max(maxs[d], val);
neverSeen.remove(d);
}
}
//find the dimension of maximum spread
int maxSpreadDim = 0;
double maxSpreadVal = 0;
for(int d = 0; d < mod; d++)
{
if(neverSeen != null && neverSeen.contains(d))
{
maxs[d] = Math.max(maxs[d], 0);
mins[d] = Math.min(mins[d], 0);
}
double v = maxs[d]-mins[d];
if(v > maxSpreadVal)
{
maxSpreadDim = d;
maxSpreadVal = v;
}
}
pivot = maxSpreadDim;
//find the value cloesest to the midpoint of the spread
double midPoint = (maxs[maxSpreadDim]-mins[maxSpreadDim])/2 + mins[maxSpreadDim];
double closestVal = maxs[maxSpreadDim];
for (int i = 0; i < data.size(); i++)
{
V v = get(i);
double val = v.get(maxSpreadDim);
if (Math.abs(midPoint - val) < Math.abs(midPoint - closestVal))
closestVal = val;
}
pivot_val = closestVal;
break;
default:
case INCREMENTAL:
pivot = depth % mod;
break;
}
final KDNode node = new KDNode(pivot);
//split index is the point in the array data that splits it into the left and right child branches
int splitIndex = -1;
//Looks like we have a pivot value? lets check it!
if(!Double.isNaN(pivot_val))
{
//lets go through and push the data around the pivot value
int front = 0;
for(int i = 0; i < data.size(); i++)
if(get(data.get(i)).get(pivot) <= pivot_val)
ListUtils.swap(data, front++, i);
//How deep would we go if the tree was balanced?
int balanced_depth = FastMath.floor_log2(allVecs.size());
if(balanced_depth*3/2 < depth
&& (front < leaf_node_size/3 || data.size()-front < leaf_node_size/3)
|| balanced_depth*3 < depth)//too lopsided, fall back to medain spliting!
pivot_val = Double.NaN;
else
{
splitIndex = front-1;
node.pivot_s = pivot_val;
}
}
if(splitIndex <= 0 || splitIndex >= data.size()-1)//Split turned bad
pivot_val = Double.NaN;//Set to NaN so that we fall back to median-based split selection
//INTENTIONALLY NOT AN ELSE IF
//pivot_val might be set to NaN if pivot looked bad
if(Double.isNaN(pivot_val))
{
Collections.sort(data, new VecIndexComparator(pivot));
splitIndex = getMedianIndex(data, pivot);
if(splitIndex == data.size()-1)//Everyone has the same value? OK, leaf node then
return new KDLeaf(depth % mod, data);
node.pivot_s = pivot_val = get(data.get(splitIndex)).get(pivot);
}
if(splitIndex == 0 || splitIndex >= data.size()-1)
{
System.out.println("Adsas");
}
//We could save code lines by making only one path threadpool dependent.
//But this order has better locality for single threaded, while the
//reverse call order workes better for multi core
if(threadpool == null)
{
node.setLeft(buildTree(data.subList(0, splitIndex+1), depth+1, threadpool, mcdl));
node.setRight(buildTree(data.subList(splitIndex+1, data.size()), depth+1, threadpool, mcdl));
}
else//multi threaded
{
mcdl.countUp();
IntList data_l = new IntList(data.subList(0, splitIndex+1));
IntList data_r = new IntList(data.subList(splitIndex+1, data.size()));
//Right side first, it will start running on a different core
threadpool.submit(() ->
{
node.setRight(buildTree(data_r, depth+1, threadpool, mcdl));
});
//now do the left here,
node.setLeft(buildTree(data_l, depth+1, threadpool, mcdl));
}
return node;
}
/**
* Returns the index for the median, adjusted incase multiple features have the same value.
* @param data the dataset to get the median index of
* @param pivot the dimension to pivot on, and ensure the median index has a different value on the left side
* @return
*/
public int getMedianIndex(final List<Integer> data, int pivot)
{
int medianIndex = data.size()/2;
//What if more than one point have the samve value? Keep incrementing until that dosn't happen
while(medianIndex < data.size()-1 && allVecs.get(data.get(medianIndex)).get(pivot) == allVecs.get(data.get(medianIndex+1)).get(pivot))
medianIndex++;
return medianIndex;
}
@Override
public void search(Vec query, int numNeighbors, List<Integer> neighbors, List<Double> distances)
{
if (numNeighbors < 1)
throw new RuntimeException("Invalid number of neighbors to search for");
BoundedSortedList<IndexDistPair> knns = new BoundedSortedList<>(numNeighbors);
// knnKDSearch(query, knns);
root.searchK(numNeighbors, knns, query, distanceMetric.getQueryInfo(query));
neighbors.clear();
distances.clear();
for (int i = 0; i < knns.size(); i++)
{
IndexDistPair pm = knns.get(i);
neighbors.add(pm.getIndex());
distances.add(pm.getDist());
}
}
@Override
public int size()
{
return size;
}
@Override
public V get(int indx)
{
return allVecs.get(indx);
}
@Override
public void search(Vec query, double range, List<Integer> neighbors, List<Double> distances)
{
if (range <= 0)
throw new RuntimeException("Range must be a positive number");
neighbors.clear();
distances.clear();
List<Double> qi = distanceMetric.getQueryInfo(query);
root.searchR(range, neighbors, distances, query, qi);
IndexTable it = new IndexTable(distances);
it.apply(neighbors);
it.apply(distances);
}
@Override
public KDTree<V> clone()
{
KDTree<V> clone = new KDTree<>(distanceMetric, pvSelection);
if(this.distCache != null)
clone.distCache = new DoubleList(this.distCache);
if(this.allVecs != null)
clone.allVecs = new ArrayList<>(this.allVecs);
clone.size = this.size;
if(this.root != null)
clone.root = this.root.clone();
return clone;
}
}
| 23,374 | 32.827786 | 142 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/RTree.java |
package jsat.linear.vectorcollection;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
import java.util.Stack;
import java.util.stream.Collectors;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.utils.BoundedSortedList;
import jsat.utils.ProbailityMatch;
import static jsat.linear.VecPaired.*;
import jsat.utils.IndexTable;
import jsat.utils.IntList;
/**
*
* @author Edward Raff
* @param <V>
*/
public class RTree<V extends Vec> implements IncrementalCollection<V>
{
private static final long serialVersionUID = -7067110612346062800L;
@Override
public void search(Vec query, double range, List<Integer> neighbors, List<Double> distances)
{
Rectangle searchSpace = new Rectangle(dim, range, query);
neighbors.clear();
distances.clear();
search(searchSpace, root, neighbors, distances);
Iterator<Integer> nIter = neighbors.iterator();
ListIterator<Double> dIter = distances.listIterator();
assert neighbors.size() == distances.size();
while(nIter.hasNext() )
{
int indx = nIter.next();
double dist = dIter.next();
if( (dist = dm.dist(query, extractTrueVec(get(indx)))) <= range)
dIter.set(dist);
else//false match, remove it
{
nIter.remove();
dIter.remove();
}
}
IndexTable it = new IndexTable(distances);
it.apply(distances);
it.apply(neighbors);
}
@Override
public void search(Vec query, int numNeighbors, List<Integer> neighbors, List<Double> distances)
{
/**
* Match up nodes with the minDist from the query to that node
*/
Stack<ProbailityMatch<RNode<V>>> stack = new Stack<>();
BoundedSortedList<ProbailityMatch<Integer>> curBest = new BoundedSortedList<>(numNeighbors);
curBest.add( new ProbailityMatch<>(Double.MAX_VALUE, -1));//add a fake just to make life easy coding
stack.push(new ProbailityMatch<>(minDist(query, root.bound), root));
/**
* Active Branch list
*/
List<ProbailityMatch<RNode<V>>> ABL = new ArrayList<>();
while(!stack.isEmpty())
{
ProbailityMatch<RNode<V>> poped = stack.pop();
RNode<V> N = poped.getMatch();
double minDistN = poped.getProbability();
if(minDistN <= curBest.last().getProbability())
{
if(N.isLeaf())
{
for(int indx : N.points)
{
double dist = dm.dist(query, extractTrueVec(get(indx)));
curBest.add(new ProbailityMatch<>(dist, indx));
}
}
else
{
for(int i = 0; i < N.size(); i++)
{
double i_min = minDist(query, N.getChild(i).bound);
if(i_min <= curBest.last().getProbability())
ABL.add(new ProbailityMatch<>(i_min, N.getChild(i)));
}
Collections.sort(ABL, Collections.reverseOrder());
stack.addAll(ABL);
ABL.clear();
}
}
}
//Now prepare to return
neighbors.clear();
distances.clear();
for(int i = 0; i < curBest.size(); i++)
{
ProbailityMatch<Integer> pm = curBest.get(i);
neighbors.add(pm.getMatch());
distances.add(pm.getProbability());
}
}
/**
* Returns the list of all points contained in the given rectangle
* @param query the rectangle to find all points that would be contained by it
* @param node the current node to search
* @param neighbors the place to store the nodes
*/
private void search(Rectangle query, RNode<V> node, List<Integer> neighbors, List<Double> distances)
{
if(!node.isLeaf())
{
for(int i = 0; i < node.size(); i++)
if(node.getChild(i).bound.intersects(query))
search(query, node.getChild(i), neighbors, distances);
}
else
for(int i = 0; i < node.size(); i++)
if(query.contains(get(node.points.get(i))))
{
neighbors.add(node.points.get(i));
distances.add(Double.NaN);
}
}
@Override
public int size()
{
return size;
}
@Override
public RTree<V> clone()
{
return new RTree<>(this);
}
private RNode cloneChangeContext(RNode toClone)
{
if (toClone != null)
if (toClone instanceof jsat.linear.vectorcollection.RTree.RNode)
return new RNode((RNode) toClone);
return null;
}
@Override
public List<Double> getAccelerationCache()
{
return null;
}
private class RNode<V extends Vec> implements Comparable<RNode<V>>, Cloneable
{
List<RNode<V>> children;
RNode<V> parent;
IntList points;
Rectangle bound;
/**
* Creating a new leaf node
* @param points
*/
public RNode(List<Integer> points)
{
this.points = new IntList(points);
children = new ArrayList<>();
bound = Rectangle.contains(points.stream().map(i->get(i)).collect(Collectors.toList()));
}
public RNode(RNode<V> toCopy)
{
this();
for(RNode<V> child : toCopy.children)
{
RNode<V> cloneChild = cloneChangeContext(child);
cloneChild.parent = this;
this.children.add(cloneChild);
}
if (toCopy.points != null)
for (int v : toCopy.points)
this.points.add(v);
if(toCopy.bound != null)
this.bound = toCopy.bound.clone();
}
public RNode()
{
points = new IntList();
children = new ArrayList<>();
bound = null;
}
RNode<V> getChild(int n)
{
return children.get(n);
}
Rectangle nthBound(int n)
{
if(isLeaf())
return new Rectangle(get(points.get(n)));
else
return children.get(n).bound;
}
@SuppressWarnings("unused")
boolean isFull()
{
return points.size() >= M;
}
/**
*
* @param indx point to add
* @return true if this node needs to be split
*/
boolean add(int indx)
{
points.add(indx);
if(bound == null)
bound = new Rectangle(get(indx));
else
bound.adjustToContain(get(indx));
return size() > M;
}
/**
*
* @param node
* @return true if this node needs to be split
*/
boolean add(RNode<V> node)
{
node.parent = this;
children.add(node);
if(bound == null)
bound = new Rectangle(node.bound);
else
bound.adjustToContain(node.bound);
return size() > M;
}
boolean isLeaf()
{
return children.isEmpty();
}
@Override
public int compareTo(RNode<V> o)
{
return Double.compare(this.bound.area(), o.bound.area());
}
/**
* If this node is a leaf, it returns the number of vectors
* contained by it. Otherwise, it returns the number of
* children nodes this node contains
*
* @return the number of elements contained by this noe
*/
private int size()
{
if(isLeaf())
return points.size();
else
return children.size();
}
@Override
protected RNode<V> clone()
{
return new RNode<>(this);
}
}
static private class Rectangle implements Cloneable
{
/**
* The maximum values for the rectangle
*/
private Vec uB;
/**
* The minimum values for the rectangle
*/
private Vec lB;
public Rectangle(Vec upperBound, Vec lowerBound)
{
this.uB = upperBound;
this.lB = lowerBound;
}
public Rectangle(int dimensions, double distance, Vec center)
{
uB = new DenseVector(dimensions);
lB = new DenseVector(dimensions);
for(int i = 0; i < dimensions; i++)
{
uB.set(i, center.get(i)+distance);
lB.set(i, center.get(i)-distance);
}
}
@SuppressWarnings("unused")
public Rectangle(int dimensions)
{
uB = new DenseVector(dimensions);
lB = new DenseVector(dimensions);
}
public Rectangle(Vec point)
{
uB = point.clone();
lB = point.clone();
}
@SuppressWarnings("unused")
public Rectangle(Vec... points)
{
this(Arrays.asList(points));
}
/**
* Creates a rectangle that covers all the given points tightly
* @param points
*/
public Rectangle(List<Vec> points)
{
uB = new DenseVector(points.get(0).length());
lB = new DenseVector(uB.length());
for(int i = 0; i < uB.length(); i++)
{
double max = Double.MIN_VALUE, min = Double.MAX_VALUE;
for(int j = 0; j < points.size(); j++)
{
max = Math.max(max, points.get(j).get(i));
min = Math.min(min, points.get(j).get(i));
}
uB.set(i, max);
lB.set(i, min);
}
}
/**
* Creates a new rectangle that contains all the given rectangles
* @param recs
*/
public Rectangle(Rectangle... recs)
{
uB = new DenseVector(recs[0].uB.length());
lB = new DenseVector(uB.length());
for(int i = 0; i < uB.length(); i++)
{
double max = Double.MIN_VALUE, min = Double.MAX_VALUE;
for(int j = 0; j < recs.length; j++)
{
max = Math.max(max, recs[j].uB.get(i));
min = Math.min(min, recs[j].lB.get(i));
}
uB.set(i, max);
lB.set(i, min);
}
}
double increasedArea(Vec v)
{
double newArea = 1;
double curArea = 1;
for(int i = 0; i < uB.length(); i++)
{
double curAreaTerm = uB.get(i)-lB.get(i);
double vi = v.get(i);
if(vi < lB.get(i))
newArea *= uB.get(i)-vi;
else if(vi > uB.get(i))
newArea *= vi-lB.get(i);
else
newArea *= curAreaTerm;
curArea *= curAreaTerm;
}
return newArea-curArea;
}
double increasedArea(Rectangle r)
{
double newArea = 1;
double curArea = 1;
for(int i = 0; i < uB.length(); i++)
{
double curAreaTerm = uB.get(i)-lB.get(i);
curArea *= curAreaTerm;
double newUBi = Math.max(uB.get(i), r.uB.get(i));
double newLBi = Math.min(lB.get(i), r.lB.get(i));
newArea *= (newUBi-newLBi);
}
return newArea-curArea;
}
double area()
{
double area = 1;
for(int i = 0; i < uB.length(); i++)
area *= uB.get(i)-lB.get(i);
return area;
}
boolean intersects(Rectangle rect)
{
for(int i = 0; i < uB.length(); i++)
{
if(this.uB.get(i) < rect.lB.get(i) || this.lB.get(i) > rect.uB.get(i))
return false;
}
return true;
}
boolean contains(Vec point)
{
for(int i = 0; i < uB.length(); i++)
if(this.uB.get(i) < point.get(i) || this.lB.get(i) > point.get(i))
return false;
return true;
}
void adjustToContain(Vec point)
{
for(int i = 0; i < uB.length(); i++)
{
double vi = point.get(i);
if(vi > uB.get(i))
uB.set(i, vi);
else if(vi < lB.get(i))
lB.set(i, vi);
}
}
void adjustToContain(Rectangle r)
{
adjustToContain(r.uB);
adjustToContain(r.lB);
}
@Override
public String toString()
{
StringBuilder sb = new StringBuilder();
sb.append("[");
sb.append(lB.get(0)).append(":").append(uB.get(0));
for(int i = 1; i < uB.length(); i++)
sb.append(",").append(lB.get(i)).append(":").append(uB.get(i));
sb.append("]");
return sb.toString();
}
@Override
protected Rectangle clone()
{
return new Rectangle(uB.clone(), lB.clone());
}
static <V extends Vec> Rectangle contains(List<V> points)
{
DenseVector uB = new DenseVector(points.get(0).length());
DenseVector lB = new DenseVector(uB.length());
for(int i = 0; i < uB.length(); i++)
{
double max = Double.MIN_VALUE, min = Double.MAX_VALUE;
for(int j = 0; j < points.size(); j++)
{
max = Math.max(max, points.get(j).get(i));
min = Math.min(min, points.get(j).get(i));
}
uB.set(i, max);
lB.set(i, min);
}
return new Rectangle(uB, lB);
}
}
private int size;
private RNode root;
/**
* Maximum number of entries per node
*/
private int M;
/**
* Minimum number of entries per node
*/
private int m;
/**
* The dimension of vectors stored
*/
private int dim;
/**
* Scratch space for distance calculations
*/
private DenseVector dcScratch;
private DistanceMetric dm;
private List<V> allVecs;
public RTree()
{
this(new EuclideanDistance());
}
public RTree(DistanceMetric dm)
{
this(dm, 5);
}
public RTree(DistanceMetric dm, int max)
{
this(dm, max, (int)(max*0.4));
}
public RTree(DistanceMetric dm, int max, int min)
{
this.root = new RNode();
if(max < 2)
throw new RuntimeException("The maximum number of elements per node must be at least 2");
else if(min > max/2 || min < 1)
throw new RuntimeException("Invalid minumum, min must be in the range[1, " + max/2 + "]");
this.M = max;
this.m = min;
setDistanceMetric(dm);
this.allVecs = new ArrayList<>();
}
/**
* Copy constructor
* @param toCopy
*/
public RTree(RTree<V> toCopy)
{
this(toCopy.dm.clone(), toCopy.M, toCopy.m);
this.size = toCopy.size;
this.dim = toCopy.dim;
if(toCopy.root != null)
this.root = cloneChangeContext(toCopy.root);
for(V v : toCopy.allVecs)
this.allVecs.add(v);
if(toCopy.dcScratch != null)
this.dcScratch = toCopy.dcScratch.clone();
}
@Override
public void build(boolean parallel, List<V> collection, DistanceMetric dm)
{
setDistanceMetric(dm);
for(V v : collection)
insert(v);
}
@Override
public void setDistanceMetric(DistanceMetric dm)
{
this.dm = dm;
}
@Override
public DistanceMetric getDistanceMetric()
{
return dm;
}
private RNode<V> chooseLeaf(Vec v)
{
/*
* CL1 [Intialize ] Set N to be the root node
*/
RNode<V> N = root;
/*
* CL2 [Leaf check ] If N 1s a leaf, return N.
*/
while(!N.isLeaf())
{
/*
* CL3 [Choose subtree ] If N 1s not a leaf,
* (1) let F be the entry in N whose rectangle
* FI needs least enlargement to
* include EI.
*
* (2)
* Resolve ties by choosmg
* the entry with the rectangle of smallest
* area
*/
double leastEnlargment = N.children.get(0).bound.increasedArea(v);
int ind = 0;
for(int i = 1; i < N.children.size(); i++)
{
//Part (1) of CL3
double nb = N.children.get(i).bound.increasedArea(v);
if(nb < leastEnlargment)//Found a better one
{
leastEnlargment = nb;
ind = i;
}
else if(nb == leastEnlargment)//Most likely when 2 or more rectangles intersect this new point
{//Part (2) of CL3
//Only pic the new one if it has a smaller area
if(N.children.get(i).bound.area() < N.children.get(ind).bound.area())
{
leastEnlargment = nb;
ind = i;
}
}
}
/*
* CL4 [Descend until a leaf 1s reached.] Set
* N to be the cMd node pomted to by
* Fp and repeat from CL2
*/
N = N.children.get(ind);
}
return N;
}
private RNode<V> splitNode(RNode<V> toSplit)
{
//Quadratic Split
/*
* [Pick first entry for each group ]
* Apply Algorithm PickSeeds to choose
* two entries to be the first elements
* of the groups Assign each to a
* group
*/
double d = Double.MIN_VALUE;
int e1 = 0, e2 = 0;
//PickSeeds
/**
* PSl [Calculate inefficiency of grouping
* entries together] For each pair of
* entries El and E2, compose a rectangle
* J including El I and E2 I Calculate
* d = area(J) - area(El I) - area(E2 I)
*/
for(int i = 0; i < toSplit.size(); i++)
for(int j = 0; j < toSplit.size(); j++)
{
if(j == i)
continue;
Rectangle E1Bound = toSplit.nthBound(i);
Rectangle E2Bound = toSplit.nthBound(j);
Rectangle J = new Rectangle(E1Bound, E2Bound);
double dCandidate = J.area() - E1Bound.area() - E2Bound.area();
if(dCandidate > d)//PS2 [Choose the most wasteful pm ] Choose the pair with the largest d
{
e1 = i;
e2 = j;
d = dCandidate;
}
}
{//Make sure that e1 < e2, makes removing easier
int maxE = Math.max(e1, e2);
e1 = Math.min(e1, e2);
e2 = maxE;
}
if(toSplit.isLeaf())
{
IntList group1 = new IntList(m+1);
IntList group2 = new IntList(m+1);
IntList toAsign = toSplit.points;//toSplit.points will get overwritten
group2.add(toAsign.remove(e2));
group1.add(toAsign.remove(e1));
Rectangle rec2 = new Rectangle(get(group2.get(0)));
Rectangle rec1 = new Rectangle(get(group1.get(0)));
while(!toAsign.isEmpty())
{
/*
* QS2 [Check If done ] If all entries have been assigned, stop. If one group has
* so few entries that all the rest must be assigned to it in order for it to
* have the muumum number m, assign them and stop
*/
if(group1.size() >=m && group2.size() < m && toAsign.size() - group2.size() == 0)
{
group2.addAll(toAsign);
toAsign.clear();
continue;
}
else if(group2.size() >=m && group1.size() < m && toAsign.size() - group1.size() == 0)
{
group1.addAll(toAsign);
toAsign.clear();
continue;
}
/*
* QS3 [Select entry to assign ] Invoke Algorithm PickNext to choose the next
* entry to assign. Add it to the group whose covering rectangle will have to
* be enlarged least to accommodate it. Resolve ties by adding the entry to
* the group mth smaller area, then to the one with fewer entries, then to
* either Repeat from QS2
*/
//PICK NEXT
/*
* [Determme cost of puttmg each entry m each group ] For each entry
* E not yet m a group, calculate d1 = the area increase required in the
* covering rectangle of Group 1 to include EI. Calculate d2 similarly
* for Group 2
*/
double minEnlargment = Double.MAX_VALUE;
int index = -1;//the index we are picking next
boolean toG1 = false;//whether it should be placed into group 1 or group 2
for(int i = 0; i < toAsign.size(); i++)
{
double enlarg1 = rec1.increasedArea(get(toAsign.get(i)));
double enlarg2 = rec2.increasedArea(get(toAsign.get(i)));
boolean thisToG1 = enlarg1 < enlarg2;
double enlarg = Math.min(enlarg1, enlarg2);
if(enlarg < minEnlargment)
{
minEnlargment = enlarg;
index = i;
toG1 = thisToG1;
}
}
//Place it
( toG1 ? group1 : group2 ).add(toAsign.remove(index));
}
toSplit.points = group1;
toSplit.bound = Rectangle.contains(toSplit.points.stream().map(i->get(i)).collect(Collectors.toList()));
return new RNode<>(group2);
}
else//TODO handles rectangles... very similar,
{
List<RNode<V>> toAsign = toSplit.children;
toSplit.children = new ArrayList<>();
toSplit.bound = null;
RNode<V> group1 = toSplit;
RNode<V> group2 = new RNode<>();
group2.add(toAsign.remove(e2));
group1.add(toAsign.remove(e1));
Rectangle rec2 = group2.bound;
Rectangle rec1 = group1.bound;
while(!toAsign.isEmpty())
{
/*
* If one group has
* so few entries that all the rest must
* be assigned to it m order for it to
* have the muumum number m,
*/
if(group1.size() >=m && group2.size() < m && toAsign.size() - group2.size() == 0)
{
for( RNode<V> node : toAsign)
group2.add(node);
toAsign.clear();
continue;
}
else if(group2.size() >=m && group1.size() < m && toAsign.size() - group1.size() == 0)
{
for( RNode<V> node : toAsign)
group1.add(node);
toAsign.clear();
continue;
}
//PICK NEXT find point with the least change in area
double minEnlargment = Double.MAX_VALUE;
int index = -1;//the index we are picking next
boolean toG1 = false;//whether it should be placed into group 1 or group 2
for(int i = 0; i < toAsign.size(); i++)
{
double enlarg1 = rec1.increasedArea(toAsign.get(i).bound);
double enlarg2 = rec2.increasedArea(toAsign.get(i).bound);
boolean thisToG1 = enlarg1 < enlarg2;
double enlarg = Math.min(enlarg1, enlarg2);
if(enlarg < minEnlargment)
{
minEnlargment = enlarg;
index = i;
toG1 = thisToG1;
}
}
//Place it
( toG1 ? group1 : group2 ).add(toAsign.remove(index));
}
return group2;
}
}
private void AdjustTree(RNode<V> L, RNode<V> LL)
{
/*
* AT1 [Imtlahze.] Set N=L If L was split
* previously, set NN to be the resultmg
* second node
*/
RNode<V> N = L, NN = LL;
while(N != root)//AT2 [Check If done ] If N 1s the root, stop
{
/*
* AT3 [Adjust covermg rectangle m parent
* entry ] Let P be the parent node of
* N, and let EN be N’s entry in P
* Adjust EN I so that it tightly encloses
* all entry rectangles in N.
*/
RNode<V> P = N.parent;
P.bound.adjustToContain(N.bound);//P alread contains us, so we dont add ourselves again!
if(NN != null)
{
/*
* AT4 [Propagate node split upward] If N has a partner NN resultmg from an
* earher spht, create a new entry Em with ENNp pointmg to NN and Em I
* enclosing all rectangles in NN. Add Em to P If there is room. Otherwise,
* invoke SplitNode to produce P and PP containing Em and all P’s old
* entries
*/
if(P.add(NN))
NN = splitNode(P); //Asignment is part of step AT5 below
else
NN = null;
}
/*
* AT5 [Move up to next level.] Set N=P and
* set NN=PP If a spht occurred,
* Repeat from AT2.
*/
N = P;
}
//Step I4 [Grow tree taller]
if(NN != null)//That means we caues the root to split! Need a new root!
{
root = new RNode<>();
root.add(N);
root.add(NN);
}
}
@Override
public V get(int indx)
{
return allVecs.get(indx);
}
@Override
public synchronized void insert(V v)
{
int indx = allVecs.size();
if(indx == 0)
{
this.dim = v.length();
this.dcScratch = new DenseVector(dim);
}
allVecs.add(v);
/*
* I1 [Find position for new record ]
* Invoke ChooseLeaf to select a leaf
* node L in which to place E
*/
RNode<V> L = chooseLeaf(v), LL = null;
/*
* I2 [Add record to leaf node ] If L has
* room for another entry, mstaI.l E
* Othemse mvoke SplitNode to obtam
* L and U contammg E and all the
* old entrees of L
*/
if(L.add(indx))//true if we need to split
LL = splitNode(L);
/*
* I3 [Propagate changes upward] Invoke
* AdjustTree on L, also passmg U If a
* spht was performed
*/
AdjustTree(L, LL);
//step I4 handeled in AdjustTree
size++;
}
/**
* The minium distance from a query point to the given rectangle
* @param p the query point
* @param r the rectangle compute the distance to
* @return the minimum distance from the point to the rectangle
*/
private double minDist(Vec p, Rectangle r)
{
if(r.contains(p))
return 0;
//Set up sctach vector
for(int i = 0; i < dim; i++)
{
double pi = p.get(i);
if (pi < r.lB.get(i))
dcScratch.set(i, r.lB.get(i));
else if (pi > r.uB.get(i))
dcScratch.set(i, r.uB.get(i));
else
dcScratch.set(i, pi);
}
return dm.dist(p, dcScratch);
}
/**
* The minimum of the maximum possible distance from the query to the rectangle
* @param p the query point
* @param r the rectangle compute the distance to
* @return the minimum of the maximum distance from the point to the rectangle
*/
@SuppressWarnings("unused")
private double minMaxDist(Vec p, Rectangle r)
{
if(r.contains(p))
return 0;
/*
* MinMaxDist is usualy describe with the minimum over another loop, explicisty as the euclidant distance
* Instead, we prepare a single vector for each loop (k), and set its values accordinly (with index k being an exception in the value set)
* We then compute the distance metric and select the min for each k
*/
double minDist = Double.MAX_VALUE;
for(int k = 0; k < dim; k++)
{
//setUp vector
for(int j = 0; j < dim; j++)
{
double pj = p.get(j);
double sj = r.lB.get(j);
double tj = r.uB.get(j);
if (j == k)//rm_k
if (pj <= (sj + tj) * 0.5)
dcScratch.set(j, sj);
else
dcScratch.set(j, tj);
else
{
if(pj >= (sj+tj)*0.5)
dcScratch.set(j, sj);
else
dcScratch.set(j, tj);
}
}
//Now just compute distance
double dist = dm.dist(p, dcScratch);
minDist = Math.min(dist, minDist);
}
return minDist;
}
/**
* The maximal distance possible between the query point and the edge of the given rectangle farthest from the point.
* @param p the query point
* @param r the rectangle compute the distance to
* @return the maximum distance from the point to the rectangle
*/
@SuppressWarnings("unused")
private double maxDist(Vec p, Rectangle r)
{
if(r.contains(p))
return 0;
//set up vector
for(int i = 0; i < dim; i++)
{
double pi = p.get(i);
double si = r.lB.get(i);
double ti = r.uB.get(i);
if(pi < si)
dcScratch.set(i, ti);
else if(pi > ti)
dcScratch.set(i, si);
else
dcScratch.set(i, pi);
}
return dm.dist(p, dcScratch);
}
}
| 32,558 | 30.336862 | 146 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/RandomBallCover.java | package jsat.linear.vectorcollection;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.utils.*;
import static java.lang.Math.*;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.utils.concurrent.ParallelUtils;
/**
* An implementation of the exact search for the Random Ball Cover algorithm.
* Unlike most algorithms, it attempts to satisfy queries in <i>O(sqrt(n))</i>
* time. It does this to be more efficient in its computation and easily
* parallelizable. Construction time is <i>O(n<sup>3/2</sup>)</i>. <br>
* Unlike the original paper, which assumes single queries will be run in
* parallel, the algorithm has been modified to perform additional pruning and
* to support range queries.
* <br><br>
* See: Cayton, L. (2012). <i>Accelerating Nearest Neighbor Search on Manycore
* Systems</i>. 2012 IEEE 26th International Parallel and Distributed Processing
* Symposium, 402–413. doi:10.1109/IPDPS.2012.45
*
* @author Edward Raff
*/
public class RandomBallCover<V extends Vec> implements IncrementalCollection<V>
{
private static final long serialVersionUID = 2437771973228849200L;
private DistanceMetric dm;
/**
* The indices match with their representatives in R
*/
private List<List<Integer>> ownedVecs;
/**
* The indices match with their representatives in R and in ownedVecs. Each
* value indicates the distance of the point to its owner. They are not in
* any order
*/
private List<DoubleList> ownedRDists;
/**
* The list of representatives
*/
private List<Integer> R;
private int size;
private List<V> allVecs;
private List<Double> distCache;
/**
* Distance from representative i to its farthest neighbor it owns
*/
private double[] repRadius;
/**
* Creates a new Random Ball Cover
* @param vecs the vectors to place into the RBC
* @param dm the distance metric to use
* @param parallel {@code true} if construction should be done in parallel,
* {@code false} for single threaded.
*/
public RandomBallCover(List<V> vecs, DistanceMetric dm, boolean parallel)
{
this.dm = dm;
build(parallel, vecs, dm);
}
/**
* Creates a new Random Ball Cover
* @param vecs the vectors to place into the RBC
* @param dm the distance metric to use
*/
public RandomBallCover(List<V> vecs, DistanceMetric dm)
{
this(vecs, dm, false);
}
public RandomBallCover(DistanceMetric dm)
{
this.dm = dm;
this.size = 0;
this.allVecs = new ArrayList<>();
if(dm.supportsAcceleration())
this.distCache = new DoubleList();
this.R = new IntList();
}
public RandomBallCover()
{
this(new EuclideanDistance());
}
/**
* Copy constructor
* @param other the RandomBallCover to create a copy of
*/
private RandomBallCover(RandomBallCover<V> other)
{
this.dm = other.dm.clone();
this.size = other.size;
if(other.allVecs != null)
this.allVecs = new ArrayList<>(other.allVecs);
if(other.distCache != null)
this.distCache = new DoubleList(other.distCache);
if(other.ownedVecs != null)
this.ownedVecs = new ArrayList<>(other.ownedVecs.size());
if(other.ownedRDists != null)
this.ownedRDists = new ArrayList<>(other.ownedRDists.size());
if(other.ownedRDists != null)
for(int i = 0; i < other.ownedRDists.size(); i++)
{
this.ownedRDists.add(new DoubleList(other.ownedRDists.get(i)));
this.ownedVecs.add(new IntList(other.ownedVecs.get(i)));
}
this.R = new IntList(other.R);
if(other.repRadius != null)
this.repRadius = Arrays.copyOf(other.repRadius, other.repRadius.length);
}
@Override
public void build(boolean parallel, List<V> collection, DistanceMetric dm)
{
setDistanceMetric(dm);
this.size = collection.size();
this.allVecs = new ArrayList<>(collection);
this.distCache = dm.getAccelerationCache(allVecs, parallel);
IntList allIndices = new IntList(allVecs.size());
ListUtils.addRange(allIndices, 0, size, 1);
setUp(allIndices, parallel);
}
@Override
public List<Double> getAccelerationCache()
{
return distCache;
}
private void setUp(List<Integer> vecIndices, boolean parallel)
{
int repCount = (int) Math.max(1, Math.sqrt(vecIndices.size()));
Collections.shuffle(vecIndices);
R = new IntList(vecIndices.subList(0, repCount));
repRadius = new double[R.size()];
ownedRDists = new ArrayList<>(repRadius.length);
IntList vecIndicesSub = new IntList(vecIndices.subList(repCount, vecIndices.size()));
ownedVecs = new ArrayList<>(repCount);
for (int i = 0; i < repCount; i++)
{
ownedVecs.add(new IntList(repCount));
ownedRDists.add(new DoubleList(repCount));
}
ParallelUtils.run(parallel, vecIndicesSub.size(), (start, end)->
{
double tmp;
for (int v : vecIndicesSub.subList(start, end))
{
int bestRep = 0;
double bestDist = dm.dist(v, R.get(0), allVecs, distCache);
for (int potentialRep = 1; potentialRep < R.size(); potentialRep++)
if ((tmp = dm.dist(v, R.get(potentialRep), allVecs, distCache)) < bestDist)
{
bestDist = tmp;
bestRep = potentialRep;
}
synchronized (ownedVecs.get(bestRep))
{
ownedVecs.get(bestRep).add(v);
ownedRDists.get(bestRep).add(bestDist);
repRadius[bestRep] = Math.max(repRadius[bestRep], bestDist);
}
}
});
}
@Override
public void search(Vec query, double range, List<Integer> neighbors, List<Double> distances)
{
neighbors.clear();
distances.clear();
List<Double> qi = dm.getQueryInfo(query);
if(repRadius == null)//brute force search b/c small collection
{
for(int i = 0; i < allVecs.size(); i++)
{
double dist = dm.dist(i, query, qi, allVecs, distCache);
if(dist <= range)
{
distances.add(dist);
neighbors.add(i);
}
}
return;
}
//Find the best representative r_q, and add its owned children to knn list.
double[] queryRDists = new double[R.size()];
for (int i = 0; i < R.size(); i++)
if ((queryRDists[i] = dm.dist(R.get(i), query, qi, allVecs, distCache)) <= range)
{
neighbors.add(R.get(i));
distances.add(queryRDists[i]);
}
IndexTable sorted = new IndexTable(queryRDists);
//k-nn search through the rest of the data set
for (int i_indx = 0; i_indx < R.size(); i_indx++)
{
int i = sorted.index(i_indx);
//Prune our representatives that are just too far
if (queryRDists[i] > range + repRadius[i])
continue;
//Add any new nn imediatly, hopefully shrinking the bound before
//the next representative is tested
double dist;
for (int j = 0; j < ownedVecs.get(i).size(); j++)
{
double rDist = ownedRDists.get(i).getD(j);
if (queryRDists[i] > range + rDist)//first inqueality on a per point basis
continue;
if ((dist = dm.dist(ownedVecs.get(i).get(j), query, qi, allVecs, distCache)) <= range)
{
neighbors.add(ownedVecs.get(i).get(j));
distances.add(dist);
}
}
}
IndexTable it = new IndexTable(distances);
it.apply(neighbors);
it.apply(distances);
}
@Override
public void search(Vec query, int numNeighbors, List<Integer> neighbors, List<Double> distances)
{
BoundedSortedList<IndexDistPair> knn = new BoundedSortedList<>(numNeighbors);
neighbors.clear();
distances.clear();
List<Double> qi = dm.getQueryInfo(query);
if(repRadius == null)//brute force search b/c small collection
{
for(int i = 0; i < allVecs.size(); i++)
knn.add(new IndexDistPair(i, dm.dist(i, query, qi, allVecs, distCache)));
}
else
{
//Find the best representative r_q, and add its owned children to knn list.
double[] queryRDists = new double[R.size()];
Arrays.fill(queryRDists, Double.MAX_VALUE);
int bestRep = 0;
for (int i = 0; i < R.size(); i++)
if ((queryRDists[i] = dm.dist(R.get(i), query, qi, allVecs, distCache)) < queryRDists[bestRep])
bestRep = i;
//Other cluster reps R will get a chance to be added to the list later
knn.add(new IndexDistPair(R.get(bestRep), queryRDists[bestRep]));
//need k'th nearest representative R for bounds check
IndexTable it = new IndexTable(queryRDists);
int kth_best_rept;
if(numNeighbors < R.size())//need the k'th closest, but if less than K we can't sure that bound
kth_best_rept = it.index(numNeighbors-1);
else//You are asking for too many neighbors, we can't use the 2nd bound
kth_best_rept = -1;//if somone uses this we will get an IndexOutOfBound, telling us about the bug!
for (int v : ownedVecs.get(bestRep))
knn.add(new IndexDistPair(v, dm.dist(v, query, qi, allVecs, distCache)));
//k-nn search through the rest of the data set
for (int sorted_order = 1; sorted_order < R.size(); sorted_order++)
{//start at 1 b/c we brute forced the closest rep first
final int i = it.index(sorted_order);
if(knn.size() == numNeighbors)//no prunnig until we reach k-nns
{
//Prune out representatives that are just too far
if (queryRDists[i] > knn.last().getDist() + repRadius[i])
continue;
//check to make sure we can use this bound before attempting
else if (kth_best_rept >= 0 && queryRDists[i] > 3 * queryRDists[kth_best_rept])
continue;
}
//Add any new nn imediatly, hopefully shrinking the bound before
//the next representative is tested
knn.add(new IndexDistPair(R.get(i), queryRDists[i]));
final List<Integer> L_i_index = ownedVecs.get(i);
final DoubleList L_i_radius = ownedRDists.get(i);
for (int j = 0; j < ownedVecs.get(i).size(); j++)
{
double rDist = L_i_radius.getD(j);
//Check the first inequality on a per point basis
if (knn.size() == numNeighbors && queryRDists[i] > knn.last().getDist() + rDist)
continue;
int indx = L_i_index.get(j);
V v = allVecs.get(indx);
knn.add(new IndexDistPair(indx, dm.dist(indx, query, qi, allVecs, distCache)));
}
}
}
for(IndexDistPair v : knn)
{
neighbors.add(v.getIndex());
distances.add(v.getDist());
}
}
@Override
public void insert(V x)
{
final int new_indx = allVecs.size();
allVecs.add(x);
List<Double> qi = dm.getQueryInfo(x);
if(distCache != null)
distCache.addAll(qi);
size++;
if(size < 10)//brute force for now
{
R.add(new_indx);//use R for brute force
return;
}
else if(repRadius == null)//initial normal build
{
R.add(new_indx);
setUp(new IntList(R), false);
return;
}
//else, normal addition
//Find the best representative
double[] queryRDists = new double[R.size()];
Arrays.fill(queryRDists, Double.MAX_VALUE);
int bestRep = 0;
for (int i = 0; i < R.size(); i++)
if ((queryRDists[i] = dm.dist(R.get(i), x, qi, allVecs, distCache)) < queryRDists[bestRep])
bestRep = i;
//Add new point and update information
ownedVecs.get(bestRep).add(new_indx);
ownedRDists.get(bestRep).add(queryRDists[bestRep]);
repRadius[bestRep] = Math.max(repRadius[bestRep], queryRDists[bestRep]);
if(pow(ceil(sqrt(size)), 2) != size)
return;//we are done
//else, expand R set
int new_r_vec_indx = -1;
{//lets randomly sample a point that isn't a rep
int ran_val = new Random().nextInt(size-R.size()-1);
int R_pos = 0;
while(ran_val >= 0)
{
if(ran_val >= ownedVecs.get(R_pos).size())
ran_val -= ownedVecs.get(R_pos++).size();
else//found the list to grab from
{
new_r_vec_indx = ownedVecs.get(R_pos).remove(ran_val);
ownedRDists.get(R_pos).remove(ran_val);
//update radius
repRadius[R_pos] = 0;
for(double d : ownedRDists.get(R_pos))
repRadius[R_pos] = Math.max(repRadius[R_pos], d);
//stop loop
break;
}
}
}
//We now have a new rep, we need to find the people it will own
double max_radius = 0;
for(double d : repRadius)
max_radius = Math.max(max_radius, d);
IntList potentialChildIndx = new IntList();
DoubleList potentialChildDist = new DoubleList();
search(get(new_r_vec_indx), max_radius, potentialChildIndx, potentialChildDist);
//add new R to set after to avoid search issues
repRadius = Arrays.copyOf(repRadius, repRadius.length+1);
R.add(new_r_vec_indx);
ownedRDists.add(new DoubleList());
ownedVecs.add(new IntList());
final int r_new = R.size()-1;
/*
* Existing structure of RBC bookkeping dosn't lend itself to the insertion case.
* B/c the R set expansion is rare, we don't modify that internal structure.
* Instead, we will create a new temporary strucutre to store things
* based on the index of the data point. This gives us easy direct
* indexing ability. We then fix-up the RBC structure at the end.
*/
int[] whoOwnsMe = new int[allVecs.size()];
Arrays.fill(whoOwnsMe, -1);
double[] distToMyOwner = new double[allVecs.size()];
for (int i = 0; i < R.size() - 1; i++)//technicaly this is O(n), but its really fast - so who cares
{
List<Integer> L_ry = ownedVecs.get(i);
for (int j = 0; j < L_ry.size(); j++)
{
whoOwnsMe[L_ry.get(j)] = i;
distToMyOwner[L_ry.get(j)] = ownedRDists.get(i).getD(j);
}
}
boolean[] R_is_dirty = new boolean[R.size()];
Arrays.fill(R_is_dirty, false);
R_is_dirty[r_new] = true;
for(int i = 0; i < potentialChildIndx.size(); i++)
{
double d_y_r_new = potentialChildDist.getD(i);
int y_indx = potentialChildIndx.getI(i);
//find who owns y_indx
int r_y = whoOwnsMe[y_indx];
if(r_y == -1)//Represantative, skip
continue;
double d_y_ry = distToMyOwner[y_indx];
if(d_y_ry > d_y_r_new)//change ownership
{
R_is_dirty[r_y] = true;
whoOwnsMe[y_indx] = r_new;
distToMyOwner[y_indx] = d_y_r_new;
}
}
//update representative radi
for (int r_indx = 0; r_indx < R.size(); r_indx++)
if(R_is_dirty[r_indx])//clear vecs so we can re-populate
{
repRadius[r_indx] = 0;
ownedRDists.get(r_indx).clear();
ownedVecs.get(r_indx).clear();
}
for(int i = 0; i < whoOwnsMe.length; i++)
{
int r_i = whoOwnsMe[i];
if(r_i == -1)//Represantative, skip
continue;
if(R_is_dirty[r_i])
{
repRadius[r_i] = Math.max(repRadius[r_i], distToMyOwner[i]);
ownedRDists.get(r_i).add(distToMyOwner[i]);
ownedVecs.get(r_i).add(i);
}
}
}
@Override
public int size()
{
return size;
}
@Override
public V get(int indx)
{
return allVecs.get(indx);
}
@Override
public RandomBallCover<V> clone()
{
return new RandomBallCover<>(this);
}
@Override
public void setDistanceMetric(DistanceMetric dm)
{
this.dm = dm;
}
@Override
public DistanceMetric getDistanceMetric()
{
return dm;
}
}
| 17,903 | 35.390244 | 115 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/RandomBallCoverOneShot.java | package jsat.linear.vectorcollection;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.utils.BoundedSortedList;
import jsat.utils.DoubleList;
import jsat.utils.FakeExecutor;
import jsat.utils.IndexTable;
import jsat.utils.IntList;
import jsat.utils.ListUtils;
import jsat.utils.concurrent.ParallelUtils;
/**
* An implementation of the on shot search for the Random Ball Cover algorithm.
* Unlike most algorithms, it attempts to satisfy queries in <i>O(sqrt(n))</i>
* time. It does this to be more efficient in its computation and easily
* parallelizable. Construction time is <i>O(n<sup>3/2</sup>)</i>. <br>
* The one shot algorithm is an approximate nearest neighbor search, and returns
* the correct nearest neighbor with a certain probability. If an incorrect
* neighbor is found, it's distance from the true nearest neighbor is bounded.
* <br>
* The RBC algorithm was not originally developed for range queries. While the
* exact RBC version can perform efficient range queries, the one-shot version
* is more likely to produce different results, potentially missing a large
* portion of the vectors that should have been included.
* <br><br>
* See: Cayton, L. (2012). <i>Accelerating Nearest Neighbor Search on Manycore
* Systems</i>. 2012 IEEE 26th International Parallel and Distributed Processing
* Symposium, 402–413. doi:10.1109/IPDPS.2012.45
*
* @author Edward Raff
*/
public class RandomBallCoverOneShot<V extends Vec> implements VectorCollection<V>
{
private static final long serialVersionUID = -2562499883847452797L;
private DistanceMetric dm;
private List<List<Integer>> ownedVecs;
private List<Integer> R;
private List<V> allVecs;
private List<Double> distCache;
/**
* The number of points each representative will consider
*/
private int s;
/**
* Distance from representative i to its farthest neighbor it owns
*/
double[] repRadius;
/**
* Creates a new one-shot version of the Random Cover Ball.
* @param vecs the vectors to place into the RBC
* @param dm the distance metric to use
* @param s the number of points to be claimed by each representative.
* @param parallel {@code true} if construction should be done in parallel,
* {@code false} for single threaded.
*/
public RandomBallCoverOneShot(List<V> vecs, DistanceMetric dm, int s, boolean parallel)
{
this.s = s;
build(parallel, vecs, dm);
}
/**
* Creates a new one-shot version of the Random Cover Ball.
* @param vecs the vectors to place into the RBC
* @param dm the distance metric to use
* @param parallel {@code true} if construction should be done in parallel,
* {@code false} for single threaded.
*/
public RandomBallCoverOneShot(List<V> vecs, DistanceMetric dm, boolean parallel)
{
this(vecs, dm, (int)Math.sqrt(vecs.size()), parallel);
}
/**
* Creates a new one-shot version of the Random Cover Ball.
* @param vecs the vectors to place into the RBC
* @param dm the distance metric to use
* @param s the number of points to be claimed by each representative.
*/
public RandomBallCoverOneShot(List<V> vecs, DistanceMetric dm, int s)
{
this(vecs, dm, s, false);
}
/**
* Creates a new one-shot version of the Random Cover Ball.
* @param vecs the vectors to place into the RBC
* @param dm the distance metric to use
*/
public RandomBallCoverOneShot(List<V> vecs, DistanceMetric dm)
{
this(vecs, dm, (int)Math.sqrt(vecs.size()));
}
public RandomBallCoverOneShot()
{
this.dm = new EuclideanDistance();
this.s = -1;
}
/**
* Copy constructor
* @param other the RandomBallCover to create a copy of
*/
private RandomBallCoverOneShot(RandomBallCoverOneShot<V> other)
{
this.dm = other.dm.clone();
this.ownedVecs = new ArrayList<>(other.ownedVecs.size());
for(int i = 0; i < other.ownedVecs.size(); i++)
{
this.ownedVecs.add(new IntList(other.ownedVecs.get(i)));
}
this.R = new IntList(other.R);
this.repRadius = Arrays.copyOf(other.repRadius, other.repRadius.length);
this.s = other.s;
if(other.distCache != null)
this.distCache = new DoubleList(other.distCache);
if(other.allVecs != null)
this.allVecs = new ArrayList<>(other.allVecs);
}
@Override
public void build(boolean parallel, List<V> collection, DistanceMetric dm)
{
this.allVecs = new ArrayList<>(collection);
distCache = dm.getAccelerationCache(collection, parallel);
IntList allIndices = new IntList(allVecs.size());
ListUtils.addRange(allIndices, 0, allVecs.size(), 1);
if(s < 0)
s = (int) Math.sqrt(allVecs.size());
setUp(allIndices, parallel);
}
private void setUp(List<Integer> allIndices, boolean parallel)
{
int repCount = (int) Math.max(1, Math.sqrt(allIndices.size()));
Collections.shuffle(allIndices);
R = allIndices.subList(0, repCount);
repRadius = new double[R.size()];
final List<Integer> allRemainingVecs = allIndices.subList(repCount, allIndices.size());
ownedVecs = new ArrayList<>(repCount);
for (int i = 0; i < repCount; i++)
{
ownedVecs.add(new IntList(s));
}
ParallelUtils.run(parallel, R.size(), (i)->
{
final int Ri = R.get(i);
final List<Integer> ROwned = ownedVecs.get(i);
BoundedSortedList<IndexDistPair> nearest = new BoundedSortedList<>(s);
for(int v : allRemainingVecs)
nearest.add(new IndexDistPair(v, dm.dist(v, Ri, allVecs, distCache)));
for(IndexDistPair pmv : nearest)
ROwned.add(pmv.getIndex());
});
}
@Override
public List<Double> getAccelerationCache()
{
return distCache;
}
@Override
public void search(Vec query, double range, List<Integer> neighbors, List<Double> distances)
{
neighbors.clear();
distances.clear();
List<Double> qi = dm.getQueryInfo(query);
//Find the best representative r_q
double tmp;
double bestDist = Double.POSITIVE_INFINITY;
int bestRep = 0;
for (int i = 0; i < R.size(); i++)
{
if ((tmp = dm.dist(R.get(i), query, qi, allVecs, distCache) ) < bestDist)
{
bestRep = i;
bestDist = tmp;
}
if(tmp <= range)
{
neighbors.add(R.get(i));
distances.add(tmp);
}
}
for (int v : ownedVecs.get(bestRep))
if((tmp = dm.dist(v, query, qi, allVecs, distCache) ) <= range)
{
neighbors.add(v);
distances.add(tmp);
}
IndexTable it = new IndexTable(distances);
it.apply(neighbors);
it.apply(distances);
}
@Override
public void search(Vec query, int numNeighbors, List<Integer> neighbors, List<Double> distances)
{
neighbors.clear();
distances.clear();
BoundedSortedList<IndexDistPair> knn =
new BoundedSortedList<>(numNeighbors);
List<Double> qi = dm.getQueryInfo(query);
//Find the best representative r_q
double tmp;
double bestDist = Double.POSITIVE_INFINITY;
int bestRep = 0;
for (int i = 0; i < R.size(); i++)
if ((tmp = dm.dist(R.get(i), query, qi, allVecs, distCache) ) < bestDist)
{
bestRep = i;
bestDist = tmp;
}
knn.add(new IndexDistPair(R.get(bestRep), bestDist));
for (int v : ownedVecs.get(bestRep))
knn.add(new IndexDistPair(v, dm.dist(v, query, qi, allVecs, distCache)));
for(IndexDistPair v : knn)
{
neighbors.add(v.getIndex());
distances.add(v.getDist());
}
}
@Override
public int size()
{
return R.size()*s;
}
@Override
public V get(int indx)
{
return allVecs.get(indx);
}
@Override
public RandomBallCoverOneShot<V> clone()
{
return new RandomBallCoverOneShot<>(this);
}
@Override
public void setDistanceMetric(DistanceMetric dm)
{
this.dm = dm;
}
@Override
public DistanceMetric getDistanceMetric()
{
return dm;
}
}
| 8,993 | 30.893617 | 100 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/SVPTree.java | package jsat.linear.vectorcollection;
import java.io.Serializable;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Deque;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Stack;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.classifiers.DataPoint;
import jsat.linear.Vec;
import jsat.linear.VecPaired;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.utils.BooleanList;
import jsat.utils.BoundedSortedList;
import jsat.utils.DoubleList;
import jsat.utils.IndexTable;
import jsat.utils.IntList;
import jsat.utils.ModifiableCountDownLatch;
import jsat.utils.Pair;
import jsat.utils.SimpleList;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
import static java.lang.Math.*;
/**
* Provides a simplified implementation of Vantage Point Trees, as described in
* "Data Structures and Algorithms for Nearest Neighbor Search in General Metric Spaces"
* by Peter N. Yianilos
* <br>
* VPTrees are more expensive to create, requiring O(n log n) distance computations. However,
* they work well for high dimensional data sets, and provide O( log n ) query time for
* {@link #search(jsat.linear.Vec, int) }
*
*
* @author Edward Raff
* @param <V>
*/
public class SVPTree<V extends Vec> implements IncrementalCollection<V>, DualTree<V>
{
private static final long serialVersionUID = -7271540108746353762L;
private DistanceMetric dm;
private List<Double> distCache;
private List<V> allVecs;
protected volatile TreeNode root;
private int size;
private int maxLeafSize = 5;
@Override
public IndexNode getRoot()
{
return root;
}
public SVPTree(List<V> list, DistanceMetric dm, boolean parallel)
{
build(parallel, list, dm);
}
public SVPTree(List<V> list, DistanceMetric dm)
{
this(list, dm, false);
}
public SVPTree()
{
this(new EuclideanDistance());
}
public SVPTree(DistanceMetric dm)
{
this.dm = dm;
if(!dm.isSubadditive())
throw new RuntimeException("VPTree only supports metrics that support the triangle inequality");
this.size = 0;
this.allVecs = new ArrayList<>();
if(dm.supportsAcceleration())
this.distCache = new DoubleList();
}
/**
* Copy constructor
* @param toClone the object to copy
*/
protected SVPTree(SVPTree<V> toClone)
{
this.dm = toClone.dm.clone();
this.root = cloneChangeContext(toClone.root);
this.size = toClone.size;
this.maxLeafSize = toClone.maxLeafSize;
if(toClone.allVecs != null)
this.allVecs = new ArrayList<>(toClone.allVecs);
if(toClone.distCache != null)
this.distCache = new DoubleList(toClone.distCache);
}
@Override
public List<Double> getAccelerationCache()
{
return distCache;
}
@Override
public double dist(int self_index, int other_index, DualTree<V> other)
{
return this.dm.dist(self_index, other.get(other_index), dm.getQueryInfo(other.get(other_index)), allVecs, distCache);
// return DualTree.super.dist(self_index, other_index, other); //To change body of generated methods, choose Tools | Templates.
}
@Override
public void build(boolean parallel, List<V> list, DistanceMetric dm)
{
setDistanceMetric(dm);
if(!dm.isSubadditive())
throw new RuntimeException("VPTree only supports metrics that support the triangle inequality");
Random rand = RandomUtil.getRandom();
this.size = list.size();
this.allVecs = list;
distCache = dm.getAccelerationCache(allVecs, parallel);
//Use simple list so both halves can be modified simultaniously
List<Pair<Double, Integer>> tmpList = new SimpleList<>(list.size());
for(int i = 0; i < allVecs.size(); i++)
tmpList.add(new Pair<>(-1.0, i));
if(!parallel)
this.root = makeVPTree(tmpList);
else
{
ExecutorService threadpool = ParallelUtils.getNewExecutor(parallel);
ModifiableCountDownLatch mcdl = new ModifiableCountDownLatch(1);
this.root = makeVPTree(tmpList, threadpool, mcdl);
mcdl.countDown();
try
{
mcdl.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(SVPTree.class.getName()).log(Level.SEVERE, null, ex);
System.err.println("Falling back to single threaded VPTree constructor");
tmpList.clear();
for(int i = 0; i < list.size(); i++)
tmpList.add(new Pair<>(-1.0, i));
this.root = makeVPTree(tmpList);
}
finally
{
threadpool.shutdownNow();
}
}
}
@Override
public void setDistanceMetric(DistanceMetric dm)
{
this.dm = dm;
}
@Override
public DistanceMetric getDistanceMetric()
{
return dm;
}
private TreeNode cloneChangeContext(TreeNode toClone)
{
if (toClone != null)
if (toClone instanceof jsat.linear.vectorcollection.SVPTree.VPLeaf)
return new VPLeaf((VPLeaf) toClone);
else
return new VPNode((VPNode) toClone);
return null;
}
@Override
public int size()
{
return size;
}
@Override
public V get(int indx)
{
return allVecs.get(indx);
}
@Override
public void insert(V x)
{
int indx = size++;
allVecs.add(x);
if(distCache != null)
distCache.addAll(dm.getQueryInfo(x));
if(root == null)
{
ArrayList<Pair<Double, Integer>> list = new ArrayList<>();
list.add(new Pair<>(Double.MAX_VALUE, indx));
root = new VPLeaf(list);
return;
}
///else, do a normal insert
root.insert(indx, Double.MAX_VALUE);
if(root instanceof jsat.linear.vectorcollection.SVPTree.VPLeaf)//is root a leaf?
{
VPLeaf leaf = (VPLeaf) root;
if(leaf.points.size() > maxLeafSize*maxLeafSize)//check to expand
{
//hacky, but works
int orig_leaf_isze = maxLeafSize;
maxLeafSize = maxLeafSize*maxLeafSize;//call normal construct with adjusted leaf size to stop expansion
ArrayList<Pair<Double, Integer>> S = new ArrayList<>();
for(int i = 0; i < leaf.points.size(); i++)
S.add(new Pair<>(Double.MAX_VALUE, leaf.points.getI(i)));
root = makeVPTree(S);
maxLeafSize = orig_leaf_isze;//restor
}
}
//else, normal non-leaf root insert handles expansion when needed
}
@Override
public void search(Vec query, double range, List<Integer> neighbors, List<Double> distances)
{
List<Double> qi = dm.getQueryInfo(query);
root.searchRange(VecPaired.extractTrueVec(query), range, neighbors, distances, 0.0, qi);
IndexTable it = new IndexTable(distances);
it.apply(neighbors);
it.apply(distances);
}
@Override
public void search(Vec query, int numNeighbors, List<Integer> neighbors, List<Double> distances)
{
BoundedSortedList<IndexDistPair> boundedList= new BoundedSortedList<>(numNeighbors, numNeighbors);
List<Double> qi = dm.getQueryInfo(query);
root.searchKNN(VecPaired.extractTrueVec(query), numNeighbors, boundedList, 0.0, qi);
for(IndexDistPair pm : boundedList)
{
neighbors.add(pm.getIndex());
distances.add(pm.getDist());
}
}
/**
* Computes the distances to the vantage point,
* Sorts the list by distance to the vantage point,
* finds the splitting index, and sets up the parent node.
* @param S the list
* @param node the parent node
* @return the index that was used to split on.
*/
private int sortSplitSet(final List<Pair<Double, Integer>> S, final VPNode node)
{
for (Pair<Double, Integer> S1 : S)
S1.setFirstItem(dm.dist(node.p, S1.getSecondItem(), allVecs, distCache)); //Each point gets its distance to the vantage point
Collections.sort(S, (Pair<Double, Integer> o1, Pair<Double, Integer> o2) -> Double.compare(o1.getFirstItem(), o2.getFirstItem()));
int splitIndex = splitListIndex(S);
node.left_low = S.get(0).getFirstItem();
node.left_high = S.get(splitIndex).getFirstItem();
node.right_low = S.get(splitIndex+1).getFirstItem();
node.right_high = S.get(S.size()-1).getFirstItem();
return splitIndex;
}
/**
* Determines which index to use as the splitting index for the VP radius
* @param S the non empty list of elements
* @return the index that should be used to split on [0, index] belonging to the left, and (index, S.size() ) belonging to the right.
*/
protected int splitListIndex(List<Pair<Double, Integer>> S)
{
return S.size()/2;
}
/**
* Returns the maximum leaf node size. Leaf nodes are used to reduce inefficiency of splitting small lists.
* If a sublist will fit into a leaf node, a leaf node will be created instead of splitting. This is the
* maximum number of points that may be used to construct a leaf node.
*
* @return the maximum leaf node size in the tree
*/
public int getMaxLeafSize()
{
return maxLeafSize;
}
/**
* Sets the maximum leaf node size. Leaf nodes are used to reduce inefficiency of splitting small lists.
* If a sublist will fit into a leaf node, a leaf node will be created instead of splitting. This is the
* maximum number of points that may be used to construct a leaf node. <br>
* The minimum leaf size is 5 for implementation reasons. If a value less than 5 is given, 5 will be used isntead.
*
* @param maxLeafSize the new maximum leaf node size.
*/
public void setMaxLeafSize(int maxLeafSize)
{
this.maxLeafSize = Math.max(5, maxLeafSize);
}
//The probability match is used to store and sort by median distances.
private TreeNode makeVPTree(List<Pair<Double, Integer>> S)
{
if(S.isEmpty())
return null;
else if(S.size() <= maxLeafSize)
{
VPLeaf leaf = new VPLeaf(S);
return leaf;
}
int vpIndex = selectVantagePointIndex(S);
final VPNode node = new VPNode(S.get(vpIndex).getSecondItem());
node.parent_dist = S.get(vpIndex).getFirstItem();
int splitIndex = sortSplitSet(S, node);
/*
* Re use the list and let it get altered. We must compute the right side first.
* If we altered the left side, the median would move left, and the right side
* would get thrown off or require aditonal book keeping.
*/
node.right = makeVPTree(S.subList(splitIndex+1, S.size()));
if(node.right != null)
node.right.parent = node;
node.left = makeVPTree(S.subList(0, splitIndex+1));
if(node.left != null)
node.left.parent = node;
return node;
}
private TreeNode makeVPTree(final List<Pair<Double, Integer>> S, final ExecutorService threadpool, final ModifiableCountDownLatch mcdl)
{
if(S.isEmpty())
{
return null;
}
else if(S.size() <= maxLeafSize)
{
VPLeaf leaf = new VPLeaf(S);
return leaf;
}
int vpIndex = selectVantagePointIndex(S);
final VPNode node = new VPNode(S.get(vpIndex).getSecondItem());
node.parent_dist = S.get(vpIndex).getFirstItem();
int splitIndex = sortSplitSet(S, node);
//Start 2 threads, but only 1 of them is "new"
mcdl.countUp();
final List<Pair<Double, Integer>> rightS = S.subList(splitIndex+1, S.size());
final List<Pair<Double, Integer>> leftS = S.subList(0, splitIndex+1);
threadpool.submit(() ->
{
node.right = makeVPTree(rightS, threadpool, mcdl);
if(node.right != null)
node.right.parent = node;
mcdl.countDown();
});
node.left = makeVPTree(leftS, threadpool, mcdl);
if(node.left != null)
node.left.parent = node;
return node;
}
private int selectVantagePointIndex(List<Pair<Double, Integer>> S)
{
int vpIndex;
vpIndex = RandomUtil.getLocalRandom().nextInt(S.size());
return vpIndex;
}
@Override
public SVPTree<V> clone()
{
return new SVPTree<>(this);
}
private abstract class TreeNode implements Cloneable, Serializable, IndexNode
{
VPNode parent;
/**
* Inserts the given data point into the tree structure. The vector
* should have already been added to {@link #allVecs}.
*
* @param x_indx the index of the vector to insert
* @param dist_to_parent the distance of the current point to the parent
* node's vantage point. May be {@link Double#MAX_VALUE} if root node.
*/
public abstract void insert(int x_indx, double dist_to_parent);
/**
* Performs a KNN query on this node.
*
* @param query the query vector
* @param k the number of neighbors to consider
* @param list the storage location on the nearest neighbors
* @param x the distance between this node's parent vantage point to the query vector.
* Though not all nodes will use this value, the leaf nodes will - so it should always be given.
* Initial calls from the root node may choose to us zero.
* @param qi the value of qi
*/
public abstract void searchKNN(Vec query, int k, BoundedSortedList<IndexDistPair> list, double x, List<Double> qi);
/**
* Performs a range query on this node
*
* @param query the query vector
* @param range the maximal distance a point can be from the query point
* to be added to the return list
* @param neighbors the storage location on the data points within the
* range of the query vector
* @param distances the value of distances to each neighbor
* @param x the distance between this node's parent vantage point to the
* query vector. Though not all nodes will use this value, the leaf
* nodes will - so it should always be given. Initial calls from the
* root node may choose to us zero.
* @param qi the value of qi
*/
public abstract void searchRange(Vec query, double range, List<Integer> neighbors, List<Double> distances, double x, List<Double> qi);
public abstract boolean isLeaf();
@Override
public abstract TreeNode clone();
}
private class VPNode extends TreeNode
{
int p;
double left_low, left_high, right_low, right_high;
TreeNode right, left;
double parent_dist;
public VPNode(int p)
{
this.p = p;
}
public VPNode(VPNode toCopy)
{
this(toCopy.p);
this.left_low = toCopy.left_low;
this.left_high = toCopy.left_high;
this.right_low = toCopy.right_low;
this.right_high = toCopy.right_high;
this.left = cloneChangeContext(toCopy.left);
this.right = cloneChangeContext(toCopy.right);
}
@Override
public boolean isLeaf()
{
return false;
}
@Override
public void insert(int x_indx, double dist_to_parent)
{
double dist = dm.dist(p, x_indx, allVecs, distCache);
TreeNode child;
if(dist*2 < left_high+right_low)
{
left_high = Math.max(left_high, dist);
left_low = Math.min(left_low, dist);
child = left = maybeExpandChild(left);
}
else
{
right_high = Math.max(right_high, dist);
right_low = Math.min(right_low, dist);
child = right = maybeExpandChild(right);
}
child.insert(x_indx, dist);
}
/**
* If the given node is a leaf node, this will check if it is time to
* expand the leaf, and return the new non-leaf child. Otherwise, it
* will return the original node.
*
* @param child the child node to potentially expand
* @return the node that should be used as the child node
*/
private TreeNode maybeExpandChild(TreeNode child)
{
//have to use fully qualified path b/c non-static child member
if(child instanceof jsat.linear.vectorcollection.SVPTree.VPLeaf)
{
IntList childs_children = ((VPLeaf) child).points;
if(childs_children.size() <= maxLeafSize*maxLeafSize)
return child;
List<Pair<Double, Integer>> S = new ArrayList<>(childs_children.size());
for(int indx : childs_children)
S.add(new Pair<>(Double.MAX_VALUE, indx));//double value will be set apprioatly later
int vpIndex = selectVantagePointIndex(S);
final VPNode node = new VPNode(S.get(vpIndex).getSecondItem());
node.parent_dist = S.get(vpIndex).getFirstItem();
node.parent = ((VPLeaf) child).parent;
//move VP to front, its self dist is zero and we dont want it used in computing bounds.
Collections.swap(S, 0, vpIndex);
int splitIndex = sortSplitSet(S.subList(1, S.size()), node)+1;//ofset by 1 b/c we sckipped the VP, which was moved to the front
node.right = new VPLeaf(S.subList(splitIndex+1, S.size()));
node.right.parent = node;
node.left = new VPLeaf(S.subList(1, splitIndex+1));
node.left.parent = node;
return node;
}
else
return child;
}
private boolean searchInLeft(double x, double tau)
{
if(left == null)
return false;
return left_low-tau <= x && x <= left_high+tau;
}
private boolean searchInRight(double x, double tau)
{
if(right == null)
return false;
return right_low-tau <= x && x <= right_high+tau;
}
@Override
public void searchKNN(Vec query, int k, BoundedSortedList<IndexDistPair> list, double x, List<Double> qi)
{
Deque<VPNode> curNode_stack = new ArrayDeque<VPNode>();
DoubleList distToParrent_stack = new DoubleList();
BooleanList search_left_stack = new BooleanList();
curNode_stack.add(this);
while(!curNode_stack.isEmpty())
{
if(curNode_stack.size() > search_left_stack.size())//we are decending the tree
{
VPNode node = curNode_stack.peek();
x = dm.dist(node.p, query, qi, allVecs, distCache);
distToParrent_stack.push(x);
if(list.size() < k || x < list.get(k-1).getDist())
list.add(new IndexDistPair(node.p, x));
double tau = list.get(list.size()-1).getDist();
double middle = (node.left_high+node.right_low)*0.5;
boolean leftFirst = x < middle;
//If we search left now, on pop we need to search right
search_left_stack.add(!leftFirst);
if(leftFirst)
{
if(node.searchInLeft(x, tau) || list.size() < k)
{
if(node.left.isLeaf())
node.left.searchKNN(query, k, list, x, qi);
else
{
curNode_stack.push((VPNode) node.left);
continue;//CurNode will now have a size 1 greater than the search_left_stach
}
}
}
else
{
if(node.searchInRight(x, tau) || list.size() < k)
{
if(node.right.isLeaf())
node.right.searchKNN(query, k, list, x, qi);
else
{
curNode_stack.push((VPNode) node.right);
continue;//CurNode will now have a size 1 greater than the search_left_stach
}
}
}
}
else//we are poping up the search patch
{
VPNode node = curNode_stack.pop();//pop, we are defintly done with this node after
x = distToParrent_stack.pop();
double tau = list.get(list.size()-1).getDist();
Boolean finishLeft = search_left_stack.pop();
if(finishLeft)
{
if(node.searchInLeft(x, tau) || list.size() < k)
{
if(node.left.isLeaf())
node.left.searchKNN(query, k, list, x, qi);
else
{
curNode_stack.push((VPNode) node.left);
continue;//CurNode will now have a size 1 greater than the search_left_stach
}
}
//else, branch was pruned. Loop back and keep popping
}
else
{
if(node.searchInRight(x, tau) || list.size() < k)
{
if(node.right.isLeaf())
node.right.searchKNN(query, k, list, x, qi);
else
{
curNode_stack.push((VPNode) node.right);
continue;//CurNode will now have a size 1 greater than the search_left_stach
}
}
//else, branch was pruned. Loop back and keep popping
}
}
}
}
public void searchKNN_recurse(Vec query, int k, BoundedSortedList<IndexDistPair> list, double x, List<Double> qi)
{
x = dm.dist(p, query, qi, allVecs, distCache);
if(list.size() < k || x < list.get(k-1).getDist())
list.add(new IndexDistPair(this.p, x));
double tau = list.get(list.size()-1).getDist();
double middle = (this.left_high+this.right_low)*0.5;
// if(this.left instanceof VPNode && this.right in)
if( x < middle)
{
if(searchInLeft(x, tau) || list.size() < k)
this.left.searchKNN(query, k, list, x, qi);
tau = list.get(list.size()-1).getDist();
if(searchInRight(x, tau) || list.size() < k)
this.right.searchKNN(query, k, list, x, qi);
}
else
{
if(searchInRight(x, tau) || list.size() < k)
this.right.searchKNN(query, k, list, x, qi);
tau = list.get(list.size()-1).getDist();
if(searchInLeft(x, tau) || list.size() < k)
this.left.searchKNN(query, k, list, x, qi);
}
}
@Override
public void searchRange(Vec query, double range, List<Integer> neighbors, List<Double> distances, double x, List<Double> qi)
{
x = dm.dist(this.p, query, qi, allVecs, distCache);
if(x <= range)
{
neighbors.add(this.p);
distances.add(x);
}
if (searchInLeft(x, range))
this.left.searchRange(query, range, neighbors, distances, x, qi);
if (searchInRight(x, range))
this.right.searchRange(query, range, neighbors, distances, x, qi);
}
@Override
public TreeNode clone()
{
return new VPNode(this);
}
@Override
public VPNode getParrent()
{
return parent;
}
@Override
public double maxNodeDistance(IndexNode other)
{
if(other instanceof jsat.linear.vectorcollection.SVPTree.VPNode)
{
jsat.linear.vectorcollection.SVPTree.VPNode o = (jsat.linear.vectorcollection.SVPTree.VPNode) other;
Vec ov = o.getVec(o.p);
List<Double> qi = dm.getQueryInfo(ov);
return dm.dist(this.p, ov, qi, allVecs, distCache) + this.right_high + o.right_high;
}
else
{
// VPLeaf c = (jsat.linear.vectorcollection.SVPTree.VPLeaf) other;
// VPNode o = c.getParrent();
// Vec ov = o.getVec(o.p);
// List<Double> qi = dm.getQueryInfo(ov);
// return dm.dist(this.p, ov, qi, allVecs, distCache) + this.right_high + c.getParentDistance();
return Double.POSITIVE_INFINITY;
}
}
@Override
public double minNodeDistance(IndexNode other)
{
if(other instanceof jsat.linear.vectorcollection.SVPTree.VPNode)
{
jsat.linear.vectorcollection.SVPTree.VPNode o = (jsat.linear.vectorcollection.SVPTree.VPNode) other;
Vec ov = o.getVec(o.p);
List<Double> qi = dm.getQueryInfo(ov);
return max(dm.dist(this.p, ov, qi, allVecs, distCache) - this.right_high - o.right_high, 0);
}
else
{
// VPLeaf c = (jsat.linear.vectorcollection.SVPTree.VPLeaf) other;
// VPNode o = c.getParrent();
// Vec ov = o.getVec(o.p);
// List<Double> qi = dm.getQueryInfo(ov);
// return dm.dist(this.p, ov, qi, allVecs, distCache) - this.right_high - c.getParentDistance();
return 0;
}
}
@Override
public double[] minMaxDistance(IndexNode other)
{
if(other instanceof jsat.linear.vectorcollection.SVPTree.VPNode)
{
jsat.linear.vectorcollection.SVPTree.VPNode o = (jsat.linear.vectorcollection.SVPTree.VPNode) other;
Vec ov = o.getVec(o.p);
List<Double> qi = dm.getQueryInfo(ov);
double d = dm.dist(this.p, ov, qi, allVecs, distCache);
return new double[]
{
max(d - this.right_high - o.right_high, 0),
d + this.right_high + o.right_high
};
}
else
{
return new double[]{0, Double.POSITIVE_INFINITY};
}
}
@Override
public double minNodeDistance(int other)
{
return max(dm.dist(p, other, allVecs, distCache) - right_low, 0);
}
@Override
public double getParentDistance()
{
return parent_dist;
}
@Override
public double furthestPointDistance()
{
return 0;//WE have one point which is the centroid, so distance is 0.
}
@Override
public double furthestDescendantDistance()
{
return right_high;
}
@Override
public int numChildren()
{
return 2;
}
@Override
public IndexNode getChild(int indx)
{
switch(indx)
{
case 0:
return left;
case 1:
return right;
default:
throw new IndexOutOfBoundsException();
}
}
@Override
public Vec getVec(int indx)
{
return get(indx);
}
@Override
public int numPoints()
{
return 0;
}
@Override
public int getPoint(int indx)
{
throw new IndexOutOfBoundsException("VPNode has only one point, can't access index " + indx);
}
}
private class VPLeaf extends TreeNode
{
/**
* The index in {@link #allVecs} for each data point stored in this Leaf node
*/
IntList points;
/**
* The distance of each point in this leaf to the parent node we came from.
*/
DoubleList bounds;
public VPLeaf(List<Pair<Double, Integer>> points)
{
this.points = new IntList(points.size());
this.bounds = new DoubleList(points.size());
for(int i = 0; i < points.size(); i++)
{
this.points.add(points.get(i).getSecondItem());
this.bounds.add(points.get(i).getFirstItem());
}
}
public VPLeaf(VPLeaf toCopy)
{
this.bounds = new DoubleList(toCopy.bounds);
this.points = new IntList(toCopy.points);
}
@Override
public void insert(int x_indx, double dist_to_parent)
{
this.points.add(x_indx);
this.bounds.add(dist_to_parent);
}
@Override
public void searchKNN(Vec query, int k, BoundedSortedList<IndexDistPair> list, double x, List<Double> qi)
{
double dist = -1;
//The zero check, for the case that the leaf is the ONLY node, x will be passed as 0.0 <= Max value will be true
double tau = list.isEmpty() ? Double.MAX_VALUE : list.get(list.size()-1).getDist();
for (int i = 0; i < points.size(); i++)
{
int point_i = points.getI(i);
double bound_i = bounds.getD(i);
if (list.size() < k)
{
list.add(new IndexDistPair(point_i, dm.dist(point_i, query, qi, allVecs, distCache)));
tau = list.get(list.size() - 1).getDist();
}
else if (bound_i - tau <= x && x <= bound_i + tau)//Bound check agains the distance to our parrent node, provided by x
if ((dist = dm.dist(point_i, query, qi, allVecs, distCache)) < tau)
{
list.add(new IndexDistPair(point_i, dist));
tau = list.get(list.size() - 1).getDist();
}
}
}
@Override
public void searchRange(Vec query, double range, List<Integer> neighbors, List<Double> distances, double x, List<Double> qi)
{
double dist = Double.MAX_VALUE;
for (int i = 0; i < points.size(); i++)
{
int point_i = points.getI(i);
double bound_i = bounds.getD(i);
if (bound_i - range <= x && x <= bound_i + range)//Bound check agains the distance to our parrent node, provided by x
if ((dist = dm.dist(point_i, query, qi, allVecs, distCache)) < range)
{
neighbors.add(point_i);
distances.add(dist);
}
}
}
@Override
public boolean isLeaf()
{
return true;
}
@Override
public TreeNode clone()
{
return new VPLeaf(this);
}
@Override
public VPNode getParrent()
{
return parent;
}
@Override
public double maxNodeDistance(IndexNode other)
{
return Double.POSITIVE_INFINITY;
// if(other instanceof jsat.linear.vectorcollection.SVPTree.VPNode)
// {
// return other.maxNodeDistance(this);
// }
// else
// {
// VPLeaf c = (jsat.linear.vectorcollection.SVPTree.VPLeaf) other;
// VPNode o = c.getParrent();
// Vec ov = o.getVec(o.p);
// List<Double> qi = dm.getQueryInfo(ov);
// return dm.dist(this.getParrent().p, ov, qi, allVecs, distCache) + this.getParentDistance() + c.getParentDistance();
// }
}
@Override
public double minNodeDistance(IndexNode other)
{
return 0;
// if(other instanceof jsat.linear.vectorcollection.SVPTree.VPNode)
// {
// return other.minNodeDistance(this);
// }
// else
// {
// VPLeaf c = (jsat.linear.vectorcollection.SVPTree.VPLeaf) other;
// VPNode o = c.getParrent();
// Vec ov = o.getVec(o.p);
// List<Double> qi = dm.getQueryInfo(ov);
// return dm.dist(this.getParrent().p, ov, qi, allVecs, distCache) - this.getParentDistance() - c.getParentDistance();
// }
}
@Override
public double minNodeDistance(int other)
{
//Leaf node, return a value that makes caller go brute-force
return 0.0;
}
@Override
public double getParentDistance()
{
return furthestDescendantDistance();
// return bounds.stream().mapToDouble(d->d).max().orElse(Double.POSITIVE_INFINITY);
}
@Override
public double furthestPointDistance()
{
return furthestDescendantDistance();
// return bounds.stream().mapToDouble(d->d).max().orElse(Double.POSITIVE_INFINITY);
}
@Override
public double furthestDescendantDistance()
{
if(bounds.isEmpty())
return 0;
return bounds.max();
}
@Override
public int numChildren()
{
return 0;
}
@Override
public IndexNode getChild(int indx)
{
throw new IndexOutOfBoundsException("Leaf nodes have no children");
}
@Override
public Vec getVec(int indx)
{
return get(indx);
}
@Override
public int numPoints()
{
return points.size();
}
@Override
public int getPoint(int indx)
{
return points.getI(indx);
}
}
}
| 36,107 | 34.022308 | 143 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/ScoreDT.java | /*
* Copyright (C) 2018 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.linear.vectorcollection;
/**
*
* @author Edward Raff
*/
public interface ScoreDT
{
/**
*
* @param query
* @param ref
* @return {@link Double#POSITIVE_INFINITY} if the node should be pruned.
*/
public double score(IndexNode ref, IndexNode query);
/**
* This method re-scores a given reference query node pair. By default this
* simply returns the original score that was given and does no computation.
* If the given original score does not look valid (is less than zero), the
* score will be re-computed. Some algorithms may choose to implement this
* method when pruning is best done after initial depth-first traversals
* have already been completed of other branches.
*
* @param ref
* @param query
* @param origScore
* @return
*/
default double score(IndexNode ref, IndexNode query, double origScore)
{
if(origScore < 0)
return score(ref, query);
else
return origScore;
}
}
| 1,746 | 30.763636 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/ScoreDTLazy.java | /*
* Copyright (C) 2018 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.linear.vectorcollection;
/**
*
* @author Edward Raff
*/
public interface ScoreDTLazy extends ScoreDT
{
@Override
public double score(IndexNode ref, IndexNode query, double origScore);
@Override
public default double score(IndexNode ref, IndexNode query)
{
return score(ref, query, -1);
}
}
| 1,036 | 27.805556 | 74 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/VPTree.java | package jsat.linear.vectorcollection;
import java.io.Serializable;
import static java.lang.Math.min;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Deque;
import java.util.List;
import java.util.Random;
import java.util.Stack;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.classifiers.DataPoint;
import jsat.linear.Vec;
import jsat.linear.VecPaired;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.utils.BooleanList;
import jsat.utils.BoundedSortedList;
import jsat.utils.DoubleList;
import jsat.utils.IndexTable;
import jsat.utils.IntList;
import jsat.utils.ModifiableCountDownLatch;
import jsat.utils.Pair;
import jsat.utils.SimpleList;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* Provides an implementation of Vantage Point Trees, as described in
* "Data Structures and Algorithms for Nearest Neighbor Search in General Metric Spaces"
* by Peter N. Yianilos
* <br>
* VPTrees are more expensive to create, requiring O(n log n) distance computations. However,
* they work well for high dimensional data sets, and provide O( log n ) query time for
* {@link #search(jsat.linear.Vec, int) }
* <br>
* Note: In the original paper, the VP-tree is detailed, and then enhanced to the VPs-tree,
* and the VPsb-tree, which each add additional optimizations. This implementation is equivalent
* to the VPsb-tree presented in the original paper.
*
* @author Edward Raff
*/
public class VPTree<V extends Vec> implements IncrementalCollection<V>, DualTree<V>
{
private static final long serialVersionUID = -7271540108746353762L;
private DistanceMetric dm;
private List<Double> distCache;
private List<V> allVecs;
private Random rand;
private int sampleSize;
private int searchIterations;
protected volatile TreeNode root;
private VPSelection vpSelection;
private int size;
private int maxLeafSize = 5;
@Override
public IndexNode getRoot()
{
return root;
}
public enum VPSelection
{
/**
* Uses the sampling method described in the original paper
*/
Sampling,
/**
* Randomly selects a new point to be the Vantage Point
*/
Random
}
public VPTree(List<V> list, DistanceMetric dm, VPSelection vpSelection, Random rand, int sampleSize, int searchIterations, boolean parallel)
{
this.vpSelection = vpSelection;
this.rand = rand;
this.searchIterations = searchIterations;
build(parallel, list, dm);
}
public VPTree(List<V> list, DistanceMetric dm, VPSelection vpSelection, Random rand, int sampleSize, int searchIterations)
{
this(list, dm, vpSelection, rand, sampleSize, searchIterations, false);
}
public VPTree(List<V> list, DistanceMetric dm, VPSelection vpSelection)
{
this(list, dm, vpSelection, RandomUtil.getRandom(), 80, 40);
}
public VPTree(List<V> list, DistanceMetric dm, boolean parallel)
{
this(list, dm, VPSelection.Random, RandomUtil.getRandom(), 80, 40, parallel);
}
public VPTree(List<V> list, DistanceMetric dm)
{
this(list, dm, VPSelection.Random);
}
public VPTree()
{
this(new EuclideanDistance());
}
public VPTree(DistanceMetric dm)
{
this(dm, VPSelection.Random);
}
public VPTree(DistanceMetric dm, VPSelection sampling)
{
this.dm = dm;
if(!dm.isSubadditive())
throw new RuntimeException("VPTree only supports metrics that support the triangle inequality");
this.rand = RandomUtil.getRandom();
this.sampleSize = 80;
this.searchIterations = 40;
this.size = 0;
this.vpSelection = sampling;
this.allVecs = new ArrayList<>();
if(dm.supportsAcceleration())
this.distCache = new DoubleList();
}
/**
* Copy constructor
* @param toClone the object to copy
*/
protected VPTree(VPTree<V> toClone)
{
this.dm = toClone.dm.clone();
this.rand = toClone.rand == null ? null : new Random(toClone.rand.nextInt());
this.sampleSize = toClone.sampleSize;
this.searchIterations = toClone.searchIterations;
this.root = cloneChangeContext(toClone.root);
this.vpSelection = toClone.vpSelection;
this.size = toClone.size;
this.maxLeafSize = toClone.maxLeafSize;
if(toClone.allVecs != null)
this.allVecs = new ArrayList<>(toClone.allVecs);
if(toClone.distCache != null)
this.distCache = new DoubleList(toClone.distCache);
}
@Override
public void build(boolean parallel, List<V> list, DistanceMetric dm)
{
setDistanceMetric(dm);
if(!dm.isSubadditive())
throw new RuntimeException("VPTree only supports metrics that support the triangle inequality");
this.rand = rand;
this.size = list.size();
this.allVecs = list;
distCache = dm.getAccelerationCache(allVecs, parallel);
//Use simple list so both halves can be modified simultaniously
List<Pair<Double, Integer>> tmpList = new SimpleList<>(list.size());
for(int i = 0; i < allVecs.size(); i++)
tmpList.add(new Pair<>(-1.0, i));
if(!parallel)
this.root = makeVPTree(tmpList);
else
{
ExecutorService threadpool = ParallelUtils.getNewExecutor(parallel);
ModifiableCountDownLatch mcdl = new ModifiableCountDownLatch(1);
this.root = makeVPTree(tmpList, threadpool, mcdl);
mcdl.countDown();
try
{
mcdl.await();
}
catch (InterruptedException ex)
{
Logger.getLogger(VPTree.class.getName()).log(Level.SEVERE, null, ex);
System.err.println("Falling back to single threaded VPTree constructor");
tmpList.clear();
for(int i = 0; i < list.size(); i++)
tmpList.add(new Pair<>(-1.0, i));
this.root = makeVPTree(tmpList);
}
finally
{
threadpool.shutdownNow();
}
}
}
@Override
public void setDistanceMetric(DistanceMetric dm)
{
this.dm = dm;
}
@Override
public DistanceMetric getDistanceMetric()
{
return dm;
}
private TreeNode cloneChangeContext(TreeNode toClone)
{
if (toClone != null)
if (toClone instanceof jsat.linear.vectorcollection.VPTree.VPLeaf)
return new VPLeaf((VPLeaf) toClone);
else
return new VPNode((VPNode) toClone);
return null;
}
@Override
public int size()
{
return size;
}
@Override
public V get(int indx)
{
return allVecs.get(indx);
}
@Override
public void insert(V x)
{
int indx = size++;
allVecs.add(x);
if(distCache != null)
distCache.addAll(dm.getQueryInfo(x));
if(root == null)
{
ArrayList<Pair<Double, Integer>> list = new ArrayList<>();
list.add(new Pair<>(Double.MAX_VALUE, indx));
root = new VPLeaf(list);
return;
}
///else, do a normal insert
root.insert(indx, Double.MAX_VALUE);
if(root instanceof jsat.linear.vectorcollection.VPTree.VPLeaf)//is root a leaf?
{
VPLeaf leaf = (VPLeaf) root;
if(leaf.points.size() > maxLeafSize*maxLeafSize)//check to expand
{
//hacky, but works
int orig_leaf_isze = maxLeafSize;
maxLeafSize = maxLeafSize*maxLeafSize;//call normal construct with adjusted leaf size to stop expansion
ArrayList<Pair<Double, Integer>> S = new ArrayList<>();
for(int i = 0; i < leaf.points.size(); i++)
S.add(new Pair<>(Double.MAX_VALUE, leaf.points.getI(i)));
root = makeVPTree(S);
maxLeafSize = orig_leaf_isze;//restor
}
}
//else, normal non-leaf root insert handles expansion when needed
}
@Override
public void search(Vec query, double range, List<Integer> neighbors, List<Double> distances)
{
List<Double> qi = dm.getQueryInfo(query);
root.searchRange(VecPaired.extractTrueVec(query), range, neighbors, distances, 0.0, qi);
IndexTable it = new IndexTable(distances);
it.apply(neighbors);
it.apply(distances);
}
@Override
public void search(Vec query, int numNeighbors, List<Integer> neighbors, List<Double> distances)
{
BoundedSortedList<IndexDistPair> boundedList= new BoundedSortedList<>(numNeighbors, numNeighbors);
List<Double> qi = dm.getQueryInfo(query);
root.searchKNN(VecPaired.extractTrueVec(query), numNeighbors, boundedList, 0.0, qi);
for(IndexDistPair pm : boundedList)
{
neighbors.add(pm.getIndex());
distances.add(pm.getDist());
}
}
@Override
public void search(Vec query, int numNeighbors, double range, List<Integer> neighbors, List<Double> distances)
{
BoundedSortedList<IndexDistPair> boundedList= new BoundedSortedList<>(numNeighbors, numNeighbors);
List<Double> qi = dm.getQueryInfo(query);
root.searchKNN_range(VecPaired.extractTrueVec(query), numNeighbors, range, boundedList, 0.0, qi);
for(IndexDistPair pm : boundedList)
{
neighbors.add(pm.getIndex());
distances.add(pm.getDist());
}
}
/**
* Computes the distances to the vantage point,
* Sorts the list by distance to the vantage point,
* finds the splitting index, and sets up the parent node.
* @param S the list
* @param node the parent node
* @return the index that was used to split on.
*/
private int sortSplitSet(final List<Pair<Double, Integer>> S, final VPNode node)
{
for (Pair<Double, Integer> S1 : S)
S1.setFirstItem(dm.dist(node.p, S1.getSecondItem(), allVecs, distCache)); //Each point gets its distance to the vantage point
Collections.sort(S, (Pair<Double, Integer> o1, Pair<Double, Integer> o2) -> Double.compare(o1.getFirstItem(), o2.getFirstItem()));
int splitIndex = splitListIndex(S);
node.left_low = S.get(0).getFirstItem();
node.left_high = S.get(splitIndex).getFirstItem();
node.right_low = S.get(splitIndex+1).getFirstItem();
node.right_high = S.get(S.size()-1).getFirstItem();
return splitIndex;
}
/**
* Determines which index to use as the splitting index for the VP radius
* @param S the non empty list of elements
* @return the index that should be used to split on [0, index] belonging to the left, and (index, S.size() ) belonging to the right.
*/
protected int splitListIndex(List<Pair<Double, Integer>> S)
{
return S.size()/2;
}
/**
* Returns the maximum leaf node size. Leaf nodes are used to reduce inefficiency of splitting small lists.
* If a sublist will fit into a leaf node, a leaf node will be created instead of splitting. This is the
* maximum number of points that may be used to construct a leaf node.
*
* @return the maximum leaf node size in the tree
*/
public int getMaxLeafSize()
{
return maxLeafSize;
}
/**
* Sets the maximum leaf node size. Leaf nodes are used to reduce inefficiency of splitting small lists.
* If a sublist will fit into a leaf node, a leaf node will be created instead of splitting. This is the
* maximum number of points that may be used to construct a leaf node. <br>
* The minimum leaf size is 5 for implementation reasons. If a value less than 5 is given, 5 will be used isntead.
*
* @param maxLeafSize the new maximum leaf node size.
*/
public void setMaxLeafSize(int maxLeafSize)
{
this.maxLeafSize = Math.max(5, maxLeafSize);
}
//The probability match is used to store and sort by median distances.
private TreeNode makeVPTree(List<Pair<Double, Integer>> S)
{
if(S.isEmpty())
return null;
else if(S.size() <= maxLeafSize)
{
VPLeaf leaf = new VPLeaf(S);
return leaf;
}
int vpIndex = selectVantagePointIndex(S);
final VPNode node = new VPNode(S.get(vpIndex).getSecondItem());
node.parent_dist = S.get(vpIndex).getFirstItem();
//move VP to front, its self dist is zero and we dont want it used in computing bounds.
Collections.swap(S, 0, vpIndex);
int splitIndex = sortSplitSet(S.subList(1, S.size()), node)+1;//ofset by 1 b/c we sckipped the VP, which was moved to the front
/*
* Re use the list and let it get altered. We must compute the right side first.
* If we altered the left side, the median would move left, and the right side
* would get thrown off or require aditonal book keeping.
*/
node.right = makeVPTree(S.subList(splitIndex+1, S.size()));
if(node.right != null)
node.right.parent = node;
node.left = makeVPTree(S.subList(1, splitIndex+1));
if(node.left != null)
node.left.parent = node;
return node;
}
private TreeNode makeVPTree(final List<Pair<Double, Integer>> S, final ExecutorService threadpool, final ModifiableCountDownLatch mcdl)
{
if(S.isEmpty())
{
return null;
}
else if(S.size() <= maxLeafSize)
{
VPLeaf leaf = new VPLeaf(S);
return leaf;
}
int vpIndex = selectVantagePointIndex(S);
final VPNode node = new VPNode(S.get(vpIndex).getSecondItem());
node.parent_dist = S.get(vpIndex).getFirstItem();
//move VP to front, its self dist is zero and we dont want it used in computing bounds.
Collections.swap(S, 0, vpIndex);
int splitIndex = sortSplitSet(S.subList(1, S.size()), node)+1;//ofset by 1 b/c we sckipped the VP, which was moved to the front
//Start 2 threads, but only 1 of them is "new"
mcdl.countUp();
final List<Pair<Double, Integer>> rightS = S.subList(splitIndex+1, S.size());
final List<Pair<Double, Integer>> leftS = S.subList(1, splitIndex+1);
threadpool.submit(() ->
{
node.right = makeVPTree(rightS, threadpool, mcdl);
if(node.right != null)
node.right.parent = node;
mcdl.countDown();
});
node.left = makeVPTree(leftS, threadpool, mcdl);
if(node.left != null)
node.left.parent = node;
return node;
}
private int selectVantagePointIndex(List<Pair<Double, Integer>> S)
{
int vpIndex;
if (vpSelection == VPSelection.Random)
vpIndex = rand.nextInt(S.size());
else//Sampling
{
List<Integer> samples = new IntList(sampleSize);
if (sampleSize <= S.size())
for (int i = 0; i < sampleSize; i++)
samples.add(S.get(i).getSecondItem());
else
for (int i = 0; i < sampleSize; i++)
samples.add(S.get(rand.nextInt(S.size())).getSecondItem());
double[] distances = new double[sampleSize];
int bestVP = -1;
double bestSpread = Double.NEGATIVE_INFINITY;
for (int i = 0; i < Math.min(searchIterations, S.size()); i++)
{
//When low on samples, just brute force!
int candIndx = searchIterations <= S.size() ? i : rand.nextInt(S.size());
int candV = S.get(candIndx).getSecondItem();
for (int j = 0; j < samples.size(); j++)
distances[j] = dm.dist(candV, samples.get(j), allVecs, distCache);
Arrays.sort(distances);
double median = distances[distances.length / 2];
double spread = 0;
for (double distance : distances)
spread += Math.abs(distance - median);
if (spread > bestSpread)
{
bestSpread = spread;
bestVP = candIndx;
}
}
vpIndex = bestVP;
}
return vpIndex;
}
/**
* Determines what point from the data set will become a vantage point, and removes it from the list
* @param S the set to select a vantage point from
* @return the index of thevantage point removed from the set
*/
private int selectVantagePoint(List<Pair<Double, Integer>> S)
{
int vpIndex = selectVantagePointIndex(S);
return S.get(vpIndex).getSecondItem();
}
@Override
public VPTree<V> clone()
{
return new VPTree<>(this);
}
@Override
public List<Double> getAccelerationCache()
{
return distCache;
}
private abstract class TreeNode implements Cloneable, Serializable, IndexNode
{
VPNode parent;
/**
* Inserts the given data point into the tree structure. The vector
* should have already been added to {@link #allVecs}.
*
* @param x_indx the index of the vector to insert
* @param dist_to_parent the distance of the current point to the parent
* node's vantage point. May be {@link Double#MAX_VALUE} if root node.
*/
public abstract void insert(int x_indx, double dist_to_parent);
/**
* Performs a KNN query on this node.
*
* @param query the query vector
* @param k the number of neighbors to consider
* @param list the storage location on the nearest neighbors
* @param x the distance between this node's parent vantage point to the query vector.
* Though not all nodes will use this value, the leaf nodes will - so it should always be given.
* Initial calls from the root node may choose to us zero.
* @param qi the value of qi
*/
public abstract void searchKNN(Vec query, int k, BoundedSortedList<IndexDistPair> list, double x, List<Double> qi);
/**
* Performs a KNN query on this node.
*
* @param query the query vector
* @param k the number of neighbors to consider
* @param radius the maximal distance a point can be from the query point
* to be added to the return list
* @param list the storage location on the nearest neighbors
* @param x the distance between this node's parent vantage point to the query vector.
* Though not all nodes will use this value, the leaf nodes will - so it should always be given.
* Initial calls from the root node may choose to us zero.
* @param qi the value of qi
*/
public abstract void searchKNN_range(Vec query, int k, double radius, BoundedSortedList<IndexDistPair> list, double x, List<Double> qi);
/**
* Performs a range query on this node
*
* @param query the query vector
* @param range the maximal distance a point can be from the query point
* to be added to the return list
* @param neighbors the storage location on the data points within the
* range of the query vector
* @param distances the value of distances to each neighbor
* @param x the distance between this node's parent vantage point to the
* query vector. Though not all nodes will use this value, the leaf
* nodes will - so it should always be given. Initial calls from the
* root node may choose to us zero.
* @param qi the value of qi
*/
public abstract void searchRange(Vec query, double range, List<Integer> neighbors, List<Double> distances, double x, List<Double> qi);
public abstract boolean isLeaf();
@Override
public abstract TreeNode clone();
public abstract int size();
}
private class VPNode extends TreeNode
{
int p;
double left_low, left_high, right_low, right_high;
TreeNode right, left;
double parent_dist;
public VPNode(int p)
{
this.p = p;
}
public VPNode(VPNode toCopy)
{
this(toCopy.p);
this.left_low = toCopy.left_low;
this.left_high = toCopy.left_high;
this.right_low = toCopy.right_low;
this.right_high = toCopy.right_high;
this.left = cloneChangeContext(toCopy.left);
this.right = cloneChangeContext(toCopy.right);
}
@Override
public boolean isLeaf()
{
return false;
}
@Override
public void insert(int x_indx, double dist_to_parent)
{
double dist = dm.dist(p, x_indx, allVecs, distCache);
TreeNode child;
if(dist*2 < left_high+right_low)
{
left_high = Math.max(left_high, dist);
left_low = Math.min(left_low, dist);
child = left = maybeExpandChild(left);
}
else
{
right_high = Math.max(right_high, dist);
right_low = Math.min(right_low, dist);
child = right = maybeExpandChild(right);
}
child.insert(x_indx, dist);
}
/**
* If the given node is a leaf node, this will check if it is time to
* expand the leaf, and return the new non-leaf child. Otherwise, it
* will return the original node.
*
* @param child the child node to potentially expand
* @return the node that should be used as the child node
*/
private TreeNode maybeExpandChild(TreeNode child)
{
//have to use fully qualified path b/c non-static child member
if(child instanceof jsat.linear.vectorcollection.VPTree.VPLeaf)
{
IntList childs_children = ((VPLeaf) child).points;
if(childs_children.size() <= maxLeafSize*maxLeafSize)
return child;
List<Pair<Double, Integer>> S = new ArrayList<>(childs_children.size());
for(int indx : childs_children)
S.add(new Pair<>(Double.MAX_VALUE, indx));//double value will be set apprioatly later
int vpIndex = selectVantagePointIndex(S);
final VPNode node = new VPNode(S.get(vpIndex).getSecondItem());
node.parent_dist = S.get(vpIndex).getFirstItem();
node.parent = ((VPLeaf) child).parent;
//move VP to front, its self dist is zero and we dont want it used in computing bounds.
Collections.swap(S, 0, vpIndex);
int splitIndex = sortSplitSet(S.subList(1, S.size()), node)+1;//ofset by 1 b/c we sckipped the VP, which was moved to the front
node.right = new VPLeaf(S.subList(splitIndex+1, S.size()));
node.right.parent = node;
node.left = new VPLeaf(S.subList(1, splitIndex+1));
node.left.parent = node;
return node;
}
else
return child;
}
private boolean searchInLeft(double x, double tau)
{
if(left == null)
return false;
return left_low-tau <= x && x <= left_high+tau;
}
private boolean searchInRight(double x, double tau)
{
if(right == null)
return false;
return right_low-tau <= x && x <= right_high+tau;
}
@Override
public void searchKNN(Vec query, int k, BoundedSortedList<IndexDistPair> list, double x, List<Double> qi)
{
Deque<VPNode> curNode_stack = new ArrayDeque<>();
DoubleList distToParrent_stack = new DoubleList();
BooleanList search_left_stack = new BooleanList();
curNode_stack.add(this);
while(!curNode_stack.isEmpty())
{
if(curNode_stack.size() > search_left_stack.size())//we are decending the tree
{
VPNode node = curNode_stack.peek();
x = dm.dist(node.p, query, qi, allVecs, distCache);
distToParrent_stack.push(x);
if(list.size() < k || x < list.get(k-1).getDist())
list.add(new IndexDistPair(node.p, x));
double tau = list.get(list.size()-1).getDist();
double middle = (node.left_high+node.right_low)*0.5;
boolean leftFirst = x < middle;
//If we search left now, on pop we need to search right
search_left_stack.add(!leftFirst);
if(leftFirst)
{
if(node.searchInLeft(x, tau) || list.size() < k)
{
if(node.left.isLeaf())
node.left.searchKNN(query, k, list, x, qi);
else
{
curNode_stack.push((VPNode) node.left);
continue;//CurNode will now have a size 1 greater than the search_left_stach
}
}
}
else
{
if(node.searchInRight(x, tau) || list.size() < k)
{
if(node.right.isLeaf())
node.right.searchKNN(query, k, list, x, qi);
else
{
curNode_stack.push((VPNode) node.right);
continue;//CurNode will now have a size 1 greater than the search_left_stach
}
}
}
}
else//we are poping up the search patch
{
VPNode node = curNode_stack.pop();//pop, we are defintly done with this node after
x = distToParrent_stack.pop();
double tau = list.get(list.size()-1).getDist();
Boolean finishLeft = search_left_stack.pop();
if(finishLeft)
{
if(node.searchInLeft(x, tau) || list.size() < k)
{
if(node.left.isLeaf())
node.left.searchKNN(query, k, list, x, qi);
else
{
curNode_stack.push((VPNode) node.left);
continue;//CurNode will now have a size 1 greater than the search_left_stach
}
}
//else, branch was pruned. Loop back and keep popping
}
else
{
if(node.searchInRight(x, tau) || list.size() < k)
{
if(node.right.isLeaf())
node.right.searchKNN(query, k, list, x, qi);
else
{
curNode_stack.push((VPNode) node.right);
continue;//CurNode will now have a size 1 greater than the search_left_stach
}
}
//else, branch was pruned. Loop back and keep popping
}
}
}
}
public void searchKNN_recurse(Vec query, int k, BoundedSortedList<IndexDistPair> list, double x, List<Double> qi)
{
x = dm.dist(p, query, qi, allVecs, distCache);
if(list.size() < k || x < list.get(k-1).getDist())
list.add(new IndexDistPair(this.p, x));
double tau = list.get(list.size()-1).getDist();
double middle = (this.left_high+this.right_low)*0.5;
// if(this.left instanceof VPNode && this.right in)
if( x < middle)
{
if(searchInLeft(x, tau) || list.size() < k)
this.left.searchKNN(query, k, list, x, qi);
tau = list.get(list.size()-1).getDist();
if(searchInRight(x, tau) || list.size() < k)
this.right.searchKNN(query, k, list, x, qi);
}
else
{
if(searchInRight(x, tau) || list.size() < k)
this.right.searchKNN(query, k, list, x, qi);
tau = list.get(list.size()-1).getDist();
if(searchInLeft(x, tau) || list.size() < k)
this.left.searchKNN(query, k, list, x, qi);
}
}
@Override
public void searchKNN_range(Vec query, int k, double radius, BoundedSortedList<IndexDistPair> list, double x, List<Double> qi)
{
Deque<VPNode> curNode_stack = new ArrayDeque<>();
DoubleList distToParrent_stack = new DoubleList();
BooleanList search_left_stack = new BooleanList();
curNode_stack.add(this);
while(!curNode_stack.isEmpty())
{
if(curNode_stack.size() > search_left_stack.size())//we are decending the tree
{
VPNode node = curNode_stack.peek();
x = dm.dist(node.p, query, qi, allVecs, distCache);
distToParrent_stack.push(x);
if(x < radius && (list.size() < k || x < list.get(k-1).getDist()))
list.add(new IndexDistPair(node.p, x));
double tau = list.size() < k ? radius : min(radius, list.get(list.size()-1).getDist());
double middle = (node.left_high+node.right_low)*0.5;
boolean leftFirst = x < middle;
//If we search left now, on pop we need to search right
search_left_stack.add(!leftFirst);
if(leftFirst)
{
if(node.searchInLeft(x, tau))
{
if(node.left.isLeaf())
node.left.searchKNN_range(query, k, radius, list, x, qi);
else
{
curNode_stack.push((VPNode) node.left);
continue;//CurNode will now have a size 1 greater than the search_left_stach
}
}
}
else
{
if(node.searchInRight(x, tau))
{
if(node.right.isLeaf())
node.right.searchKNN_range(query, k, radius, list, x, qi);
else
{
curNode_stack.push((VPNode) node.right);
continue;//CurNode will now have a size 1 greater than the search_left_stach
}
}
}
}
else//we are poping up the search patch
{
VPNode node = curNode_stack.pop();//pop, we are defintly done with this node after
x = distToParrent_stack.pop();
double tau = list.size() < k ? radius : min(radius, list.get(list.size()-1).getDist());
Boolean finishLeft = search_left_stack.pop();
if(finishLeft)
{
if(node.searchInLeft(x, tau))
{
if(node.left.isLeaf())
node.left.searchKNN_range(query, k, radius, list, x, qi);
else
{
curNode_stack.push((VPNode) node.left);
continue;//CurNode will now have a size 1 greater than the search_left_stach
}
}
//else, branch was pruned. Loop back and keep popping
}
else
{
if(node.searchInRight(x, tau))
{
if(node.right.isLeaf())
node.right.searchKNN_range(query, k, radius, list, x, qi);
else
{
curNode_stack.push((VPNode) node.right);
continue;//CurNode will now have a size 1 greater than the search_left_stach
}
}
//else, branch was pruned. Loop back and keep popping
}
}
}
}
@Override
public void searchRange(Vec query, double range, List<Integer> neighbors, List<Double> distances, double x, List<Double> qi)
{
x = dm.dist(this.p, query, qi, allVecs, distCache);
if(x <= range)
{
neighbors.add(this.p);
distances.add(x);
}
if (searchInLeft(x, range))
this.left.searchRange(query, range, neighbors, distances, x, qi);
if (searchInRight(x, range))
this.right.searchRange(query, range, neighbors, distances, x, qi);
}
@Override
public TreeNode clone()
{
return new VPNode(this);
}
@Override
public IndexNode getParrent()
{
return parent;
}
@Override
public double maxNodeDistance(IndexNode other)
{
if(other instanceof jsat.linear.vectorcollection.VPTree.VPNode)
{
jsat.linear.vectorcollection.VPTree.VPNode o = (jsat.linear.vectorcollection.VPTree.VPNode) other;
// return dm.dist(this.p, o.p, allVecs, distCache) - this.right_high - o.right_high;
Vec ov = o.getVec(o.p);
List<Double> qi = dm.getQueryInfo(ov);
return dm.dist(this.p, ov, qi, allVecs, distCache) + this.right_high + o.right_high;
}
else
{
// jsat.linear.vectorcollection.VPTree.VPLeaf o = (jsat.linear.vectorcollection.VPTree.VPLeaf) other;
return Double.POSITIVE_INFINITY;
}
}
@Override
public double minNodeDistance(IndexNode other)
{
if(other instanceof jsat.linear.vectorcollection.VPTree.VPNode)
{
jsat.linear.vectorcollection.VPTree.VPNode o = (jsat.linear.vectorcollection.VPTree.VPNode) other;
// return dm.dist(this.p, o.p, allVecs, distCache) - this.right_high - o.right_high;
Vec ov = o.getVec(o.p);
List<Double> qi = dm.getQueryInfo(ov);
return dm.dist(this.p, ov, qi, allVecs, distCache) - this.right_high - o.right_high;
// return dm.dist(ov, get(this.p)) - this.right_high - o.right_high;
// return 0;
}
else
{
// jsat.linear.vectorcollection.VPTree.VPLeaf o = (jsat.linear.vectorcollection.VPTree.VPLeaf) other;
return 0;
}
}
@Override
public double[] minMaxDistance(IndexNode other)
{
if(other instanceof jsat.linear.vectorcollection.VPTree.VPNode)
{
jsat.linear.vectorcollection.VPTree.VPNode o = (jsat.linear.vectorcollection.VPTree.VPNode) other;
Vec ov = o.getVec(o.p);
List<Double> qi = dm.getQueryInfo(ov);
double d = dm.dist(this.p, ov, qi, allVecs, distCache);
return new double[]
{
d - this.right_high - o.right_high,
d + this.right_high + o.right_high
};
}
else
{
return new double[]{0, Double.POSITIVE_INFINITY};
}
}
@Override
public double minNodeDistance(int other)
{
return 0;
// return dm.dist(p, other, allVecs, distCache) - right_low;
}
@Override
public double getParentDistance()
{
return parent_dist;
}
@Override
public double furthestPointDistance()
{
return 0;//WE have one point which is the centroid, so distance is 0.
}
@Override
public double furthestDescendantDistance()
{
return right_high;
// return Double.POSITIVE_INFINITY;
}
@Override
public int numChildren()
{
return 2;
}
@Override
public IndexNode getChild(int indx)
{
switch(indx)
{
case 0:
return left;
case 1:
return right;
default:
throw new IndexOutOfBoundsException();
}
}
@Override
public Vec getVec(int indx)
{
return get(indx);
}
@Override
public int numPoints()
{
return 1;
}
@Override
public int getPoint(int indx)
{
if(indx == 0)
return p;
else
throw new IndexOutOfBoundsException("VPNode has only one point, can't access index " + indx);
}
@Override
public int size()
{
return 0 + left.size() + right.size();
}
@Override
public boolean allPointsInLeaves()
{
return false;
}
}
private class VPLeaf extends TreeNode
{
/**
* The index in {@link #allVecs} for each data point stored in this Leaf node
*/
IntList points;
/**
* The distance of each point in this leaf to the parent node we came from.
*/
DoubleList bounds;
public VPLeaf(List<Pair<Double, Integer>> points)
{
this.points = new IntList(points.size());
this.bounds = new DoubleList(points.size());
for(int i = 0; i < points.size(); i++)
{
this.points.add(points.get(i).getSecondItem());
this.bounds.add(points.get(i).getFirstItem());
}
}
public VPLeaf(VPLeaf toCopy)
{
this.bounds = new DoubleList(toCopy.bounds);
this.points = new IntList(toCopy.points);
}
@Override
public void insert(int x_indx, double dist_to_parent)
{
this.points.add(x_indx);
this.bounds.add(dist_to_parent);
}
@Override
public void searchKNN(Vec query, int k, BoundedSortedList<IndexDistPair> list, double x, List<Double> qi)
{
double dist = -1;
//The zero check, for the case that the leaf is the ONLY node, x will be passed as 0.0 <= Max value will be true
double tau = list.isEmpty() ? Double.MAX_VALUE : list.get(list.size()-1).getDist();
for (int i = 0; i < points.size(); i++)
{
int point_i = points.getI(i);
double bound_i = bounds.getD(i);
if (list.size() < k)
{
list.add(new IndexDistPair(point_i, dm.dist(point_i, query, qi, allVecs, distCache)));
tau = list.get(list.size() - 1).getDist();
}
else if (bound_i - tau <= x && x <= bound_i + tau)//Bound check agains the distance to our parrent node, provided by x
if ((dist = dm.dist(point_i, query, qi, allVecs, distCache)) < tau)
{
list.add(new IndexDistPair(point_i, dist));
tau = list.get(list.size() - 1).getDist();
}
}
}
@Override
public void searchRange(Vec query, double range, List<Integer> neighbors, List<Double> distances, double x, List<Double> qi)
{
double dist = Double.MAX_VALUE;
for (int i = 0; i < points.size(); i++)
{
int point_i = points.getI(i);
double bound_i = bounds.getD(i);
if (bound_i - range <= x && x <= bound_i + range)//Bound check agains the distance to our parrent node, provided by x
if ((dist = dm.dist(point_i, query, qi, allVecs, distCache)) < range)
{
neighbors.add(point_i);
distances.add(dist);
}
}
}
@Override
public void searchKNN_range(Vec query, int k, double range, BoundedSortedList<IndexDistPair> list, double x, List<Double> qi)
{
double dist = -1;
//The zero check, for the case that the leaf is the ONLY node, x will be passed as 0.0 <= Max value will be true
double tau = list.size() < k ? range : min(range, list.get(list.size()-1).getDist());
for (int i = 0; i < points.size(); i++)
{
int point_i = points.getI(i);
double bound_i = bounds.getD(i);
if (bound_i - tau <= x && x <= bound_i + tau)//Bound check agains the distance to our parrent node, provided by x
if ((dist = dm.dist(point_i, query, qi, allVecs, distCache)) < tau)
{
list.add(new IndexDistPair(point_i, dist));
tau = min(range, list.get(list.size() - 1).getDist());
}
}
}
@Override
public boolean isLeaf()
{
return true;
}
@Override
public TreeNode clone()
{
return new VPLeaf(this);
}
@Override
public IndexNode getParrent()
{
return parent;
}
@Override
public double maxNodeDistance(IndexNode other)
{
//Leaf node, return a value that makes caller go brute-force
return Double.POSITIVE_INFINITY;
}
@Override
public double minNodeDistance(IndexNode other)
{
//Leaf node, return a value that makes caller go brute-force
return 0.0;
}
@Override
public double minNodeDistance(int other)
{
//Leaf node, return a value that makes caller go brute-force
return 0.0;
}
@Override
public double getParentDistance()
{
return bounds.stream().mapToDouble(d->d).max().orElse(Double.POSITIVE_INFINITY);
}
@Override
public double furthestPointDistance()
{
return bounds.stream().mapToDouble(d->d).max().orElse(Double.POSITIVE_INFINITY);
}
@Override
public double furthestDescendantDistance()
{
return bounds.stream().mapToDouble(d->d).max().orElse(Double.POSITIVE_INFINITY);
}
@Override
public int numChildren()
{
return 0;
}
@Override
public IndexNode getChild(int indx)
{
throw new IndexOutOfBoundsException("Leaf nodes have no children");
}
@Override
public Vec getVec(int indx)
{
return get(indx);
}
@Override
public int numPoints()
{
return points.size();
}
@Override
public int getPoint(int indx)
{
return points.getI(indx);
}
@Override
public int size()
{
return points.size();
}
}
}
| 45,817 | 35.363492 | 144 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/VPTreeMV.java |
package jsat.linear.vectorcollection;
import java.util.List;
import java.util.Random;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.math.OnLineStatistics;
import jsat.utils.Pair;
/**
* The VPTreeMV is an extension of the VPTree, the MV meaning "of Minimum Variance". This extension
* alters the splitting method of nodes, giving up the O(log n) bound on query time. This is done
* to reduce the variance in the distance to the parent node of each split, which can result in
* lopsided distributions of values for each split. At the same time, this lopsidedness may better
* reflect the locality of points in the data set. This can result in a decrease in query time
* for some data sets, with minimal impact on construction time.
*
* @author Edward Raff
*/
public class VPTreeMV<V extends Vec> extends VPTree<V>
{
private static final long serialVersionUID = 6668184445206226077L;
public VPTreeMV(List<V> list, DistanceMetric dm, VPSelection vpSelection, Random rand, int sampleSize, int searchIterations, boolean parallel)
{
super(list, dm, vpSelection, rand, sampleSize, searchIterations, parallel);
}
public VPTreeMV(List<V> list, DistanceMetric dm, VPSelection vpSelection, Random rand, int sampleSize, int searchIterations)
{
super(list, dm, vpSelection, rand, sampleSize, searchIterations);
}
public VPTreeMV(List<V> list, DistanceMetric dm, VPSelection vpSelection)
{
super(list, dm, vpSelection);
}
public VPTreeMV(List<V> list, DistanceMetric dm)
{
super(list, dm);
}
public VPTreeMV(List<V> list, DistanceMetric dm, boolean parallel)
{
super(list, dm, parallel);
}
public VPTreeMV(DistanceMetric dm)
{
super(dm);
}
public VPTreeMV(DistanceMetric dm, VPSelection sampling)
{
super(dm, sampling);
}
public VPTreeMV()
{
super();
}
public VPTreeMV(VPTreeMV toClone)
{
super(toClone);
}
@Override
protected int splitListIndex(List<Pair<Double, Integer>> S)
{
int splitIndex = S.size()/2;
int maxLeafSize = getMaxLeafSize();
if(S.size() >= maxLeafSize*4)
{
//Adjust to avoid degenerate cases that create a long string of tiny splits. Most imbalacned slpit can be 1:20
int minSplitSize = Math.max(maxLeafSize, S.size()/20);
OnLineStatistics rightV = new OnLineStatistics();
OnLineStatistics leftV = new OnLineStatistics();
for(int i = 0; i < minSplitSize; i++)
leftV.add(S.get(i).getFirstItem());
for(int i = minSplitSize; i < S.size(); i++)
rightV.add(S.get(i).getFirstItem());
splitIndex = minSplitSize;
double bestVar = leftV.getVarance()*minSplitSize+rightV.getVarance()*(S.size()-minSplitSize);
for(int i = minSplitSize+1; i < S.size()-minSplitSize; i++)
{
double tmp = S.get(i).getFirstItem();
leftV.add(tmp);
rightV.remove(tmp, 1.0);
double testVar = leftV.getVarance()*i + rightV.getVarance()*(S.size()-i);
if(testVar < bestVar)
{
splitIndex = i;
bestVar = testVar;
}
}
}
return splitIndex;
}
@Override
public VPTreeMV<V> clone()
{
return new VPTreeMV<>(this);
}
}
| 3,575 | 30.646018 | 146 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/VectorArray.java |
package jsat.linear.vectorcollection;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.utils.BoundedSortedList;
import jsat.utils.DoubleList;
import jsat.utils.IndexTable;
/**
* This is the naive implementation of a Vector collection. Construction time is
* O(n) only to clone the n elements, and all queries are O(n)
* <br><br>
* Removing elements from the vector array will result in the destruction of any
* {@link DistanceMetric#getAccelerationCache(java.util.List) acceleration cache}
*
* @author Edward Raff
*/
public class VectorArray<V extends Vec> extends ArrayList<V> implements IncrementalCollection<V>
{
private static final long serialVersionUID = 5365949686370986234L;
private DistanceMetric distanceMetric;
private List<Double> distCache;
public VectorArray()
{
this(new EuclideanDistance(), 20);
}
public VectorArray(DistanceMetric distanceMetric, int initialCapacity)
{
super(initialCapacity);
this.distanceMetric = distanceMetric;
if(distanceMetric.supportsAcceleration())
distCache = new DoubleList(initialCapacity);
}
public VectorArray(DistanceMetric distanceMetric, Collection<? extends V> c)
{
super(c);
this.distanceMetric = distanceMetric;
if(distanceMetric.supportsAcceleration())
distCache = distanceMetric.getAccelerationCache(this);
}
public VectorArray(DistanceMetric distanceMetric)
{
super();
this.distanceMetric = distanceMetric;
if(distanceMetric.supportsAcceleration())
distCache = new DoubleList();
}
@Override
public DistanceMetric getDistanceMetric()
{
return distanceMetric;
}
@Override
public void setDistanceMetric(DistanceMetric distanceMetric)
{
if(this.distanceMetric == distanceMetric)
return;//avoid recomputing neadlessly
this.distanceMetric = distanceMetric;
if(distanceMetric.supportsAcceleration())
this.distCache = distanceMetric.getAccelerationCache(this);
else
this.distCache = null;
}
@Override
public void insert(V x)
{
add(x);
}
@Override
public boolean add(V e)
{
boolean toRet = super.add(e);
if(distCache != null)
this.distCache.addAll(distanceMetric.getQueryInfo(e));
return toRet;
}
@Override
public List<Double> getAccelerationCache()
{
return distCache;
}
@Override
public boolean addAll(Collection<? extends V> c)
{
boolean toRet = super.addAll(c);
if(this.distCache != null)
for(V v : c)
this.distCache.addAll(this.distanceMetric.getQueryInfo(v));
return toRet;
}
@Override
public V remove(int index)
{
distCache = null;
return super.remove(index); //To change body of generated methods, choose Tools | Templates.
}
@Override
public void clear()
{
super.clear();
this.distCache = distanceMetric.getAccelerationCache(this);
}
@Override
public void search(Vec query, double range, List<Integer> neighbors, List<Double> distances)
{
neighbors.clear();
distances.clear();
List<Double> qi = distanceMetric.getQueryInfo(query);
for(int i = 0; i < size(); i++)
{
double dist = distanceMetric.dist(i, query, qi, this, distCache);
if(dist <= range)
{
neighbors.add(i);
distances.add(dist);
}
}
IndexTable it = new IndexTable(distances);
it.apply(neighbors);
it.apply(distances);
}
@Override
public void search(Vec query, int numNeighbors, List<Integer> neighbors, List<Double> distances)
{
neighbors.clear();
distances.clear();
BoundedSortedList<IndexDistPair> knns = new BoundedSortedList<>(numNeighbors);
List<Double> qi = distanceMetric.getQueryInfo(query);
for(int i = 0; i < size(); i++)
{
double distance = distanceMetric.dist(i, query, qi, this, distCache);
knns.add(new IndexDistPair(i, distance));
}
for(int i = 0; i < knns.size(); i++)
{
neighbors.add(knns.get(i).getIndex());
distances.add(knns.get(i).getDist());
}
}
@Override
public VectorArray<V> clone()
{
VectorArray<V> clone = new VectorArray<>(distanceMetric, this);
return clone;
}
@Override
public void build(boolean parallel, List<V> collection, DistanceMetric dm)
{
clear();
setDistanceMetric(dm);
addAll(collection);
}
}
| 5,014 | 26.707182 | 100 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/VectorCollection.java |
package jsat.linear.vectorcollection;
import java.io.Serializable;
import static java.lang.Math.max;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import jsat.linear.Vec;
import jsat.linear.VecPaired;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.utils.DoubleList;
import jsat.utils.FibHeap;
import jsat.utils.IntList;
import jsat.utils.Tuple3;
import jsat.utils.concurrent.ParallelUtils;
/**
* A Vector Collection is a collection of vectors that is meant to be used to
* obtain a subset of the collection via a query vector. A query can be for
* the nearest neighbors, or for all vectors within a given range.
* <br>
* Different vector collections have different performance properties for both training and execution time.
*
* @author Edward Raff
* @param <V>
*/
public interface VectorCollection<V extends Vec> extends Cloneable, Serializable
{
/**
* Builds this metric index from the given collection of points using
* whatever distance metric is currently set for the metric index.
*
* @param collection the list of vectors to put into the index
*/
default public void build(List<V> collection)
{
build(false, collection);
}
/**
* Builds this metric index from the given collection of points using the
* given distance metric.
*
* @param collection the list of vectors to put into the index
* @param dm the distance metric to build the index using.
*/
default public void build(List<V> collection, DistanceMetric dm)
{
build(false, collection, dm);
}
/**
* Builds this metric index from the given collection of points using
* whatever distance metric is currently set for the metric index.
*
* @param parallel {@code true} if the index should be built in parallel, or
* {@code false} if it should be done in a single thread.
* @param collection the list of vectors to put into the index
*/
default public void build(boolean parallel, List<V> collection)
{
build(parallel, collection, getDistanceMetric());
}
/**
* Builds this metric index from the given collection of points using the
* given distance metric.
*
* @param parallel {@code true} if the index should be built in parallel, or
* {@code false} if it should be done in a single thread.
* @param collection the list of vectors to put into the index
* @param dm the distance metric to build the index using.
*/
public void build(boolean parallel, List<V> collection, DistanceMetric dm);
/**
* Sets the distance metric used for this collection.
* @param dm the distance metric to use
*/
public void setDistanceMetric(DistanceMetric dm);
/**
*
* @return the distance metric to use
*/
public DistanceMetric getDistanceMetric();
/**
* Searches the space for all vectors that are within a given range of the query vector.
* @param query the vector we want to find others near
* @param range the search range around our query
* @return the list of all vectors within the range of our query. The paired value contains the distance to the query vector.
* @deprecated This API is from the original JSAT interface. It will be removed in the future.
*/
default public List<? extends VecPaired<V, Double>> search(Vec query, double range)
{
IntList neighbors = new IntList();
DoubleList distances = new DoubleList();
search(query, range, neighbors, distances);
List<VecPaired<V, Double>> toRet = new ArrayList<>();
for(int i = 0; i < neighbors.size(); i++)
toRet.add(new VecPaired<>(get(neighbors.getI(i)), distances.getD(i)));
return toRet;
}
/**
* Searches the space for the k neighbors that are closest to the given query vector
* @param query the vector we want to find neighbors of
* @param num_neighbors the maximum number of neighbors to return
* @return the list the k nearest neighbors, in sorted order from closest to farthest. The paired value contains the distance to the query vector
* @deprecated This API is from the original JSAT interface. It will be removed in the future.
*/
default public List<? extends VecPaired<V, Double>> search(Vec query, int num_neighbors)
{
IntList neighbors = new IntList();
DoubleList distances = new DoubleList();
search(query, num_neighbors, neighbors, distances);
List<VecPaired<V, Double>> toRet = new ArrayList<>();
for(int i = 0; i < neighbors.size(); i++)
toRet.add(new VecPaired<>(get(neighbors.getI(i)), distances.getD(i)));
return toRet;
}
/**
* Performs a range search of the current collection. The index and distance
* of each found neighbor will be placed into the given Lists.
*
* @param query the point to search for the neighbors within a given radius.
* @param range the radius to search for all the neighbors with a distance
* ≤ range.
* @param neighbors the list to store the index of the neighbors in. Will be
* sorted by distance to the query, and paired with the values in
* <tt>distances</tt>.
* @param distances the list to store the distance of the neighbors to the
* query in. Will be sorted, and paired with the values in
* <tt>neighbors</tt>.
*/
public void search(Vec query, double range, List<Integer> neighbors, List<Double> distances);
/**
* Performs k-Nearest Neighbor search of the current collection. The index
* and distance of each found neighbor will be placed into the given Lists.
*
* @param query the point to search for the k-nearest neighbors of
* @param numNeighbors the number of neighbors <i>k</i> to search for.
* @param neighbors the list to store the index of the neighbors in. Will be
* sorted by distance to the query, and paired with the values in
* <tt>distances</tt>.
* @param distances the list to store the distance of the neighbors to the
* query in. Will be sorted, and paired with the values in
* <tt>neighbors</tt>.
*/
public void search(Vec query, int numNeighbors, List<Integer> neighbors, List<Double> distances);
/**
* Performs k-Nearest Neighbor search of the current collection.The index
* and distance of each found neighbor will be placed into the given Lists,
* but only if they are within a radius of <tt>range</tt> from the query. As
* such, this method may not return <tt>k</tt> neighbors.
*
* @param query the point to search for the k-nearest neighbors of
* @param numNeighbors the number of neighbors <i>k</i> to search for.
* @param range the radius to search for all the neighbors with a
* distance ≤ range.
* @param neighbors the list to store the index of the neighbors in. Will
* be sorted by distance to the query, and paired with
* the values in
* <tt>distances</tt>.
* @param distances the list to store the distance of the neighbors to
* the query in. Will be sorted, and paired with the
* values in
* <tt>neighbors</tt>.
*/
default public void search(Vec query, int numNeighbors, double range, List<Integer> neighbors, List<Double> distances)
{
search(query, numNeighbors, neighbors, distances);
for(int i = 0; i < neighbors.size(); i++)
{
if(distances.get(i) > range)
{
//fluch out the tail and back out!
distances.subList(i, distances.size()).clear();
neighbors.subList(i, distances.size()).clear();
break;
}
}
}
/**
* Accesses a vector from this collection via index.
* @param indx the index in [0, {@link #size() }) of the vector to access
* @return the vector from the collection
*/
public V get(int indx);
public List<Double> getAccelerationCache();
/**
* Returns the number of vectors stored in the collection
* @return the size of the collection
*/
public int size();
default public void search(List<V> Q, double r_min, double r_max, List<List<Integer>> neighbors, List<List<Double>> distances , boolean parallel)
{
VectorCollection<V> vc = new VectorArray<>(getDistanceMetric(), Q);
search(vc, r_min, r_max, neighbors, distances, parallel);
}
default public void search(VectorCollection<V> Q, double r_min, double r_max, List<List<Integer>> neighbors, List<List<Double>> distances , boolean parallel)
{
neighbors.clear();
distances.clear();
for(int i = 0; i < Q.size(); i++)
{
neighbors.add(new ArrayList<>());
distances.add(new ArrayList<>());
}
ParallelUtils.range(Q.size(), parallel).forEach(i->
{
//this gets everything up to max
this.search(Q.get(i), r_max, neighbors.get(i), distances.get(i));
//now lets remove the things below min
int indx = Collections.binarySearch(distances.get(i), r_min);
if(indx < 0)
indx = -indx-1;
neighbors.get(i).subList(0, indx).clear();
distances.get(i).subList(0, indx).clear();
});
}
default public void search(List<V> Q, int numNeighbors, List<List<Integer>> neighbors, List<List<Double>> distances, boolean parallel)
{
VectorCollection<V> vc = new VectorArray<>(getDistanceMetric(), Q);
search(vc, numNeighbors, neighbors, distances, parallel);
}
default public void search(VectorCollection<V> Q, int numNeighbors, List<List<Integer>> neighbors, List<List<Double>> distances, boolean parallel)
{
neighbors.clear();
distances.clear();
for(int i = 0; i < Q.size(); i++)
{
neighbors.add(new ArrayList<>());
distances.add(new ArrayList<>());
}
ParallelUtils.range(Q.size(), parallel).forEach(i->
{
//this gets everything up to max
this.search(Q.get(i), numNeighbors, neighbors.get(i), distances.get(i));
});
}
public VectorCollection<V> clone();
public default List<Vec> getVecs()
{
return IntStream.range(0, size())
.mapToObj(this::get)
.collect(Collectors.toList());
}
}
| 10,767 | 38.443223 | 161 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/VectorCollectionUtils.java |
package jsat.linear.vectorcollection;
import java.util.*;
import java.util.concurrent.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import jsat.linear.Vec;
import jsat.linear.VecPaired;
import jsat.math.OnLineStatistics;
import jsat.utils.ListUtils;
import static jsat.utils.SystemInfo.LogicalCores;
import jsat.utils.concurrent.ParallelUtils;
/**
* A collection of common utility methods to perform on a {@link VectorCollection}
*
* @author Edward Raff
*/
public class VectorCollectionUtils
{
/**
* Searches the given collection for the <tt>k</tt> nearest neighbors for every data point in the given search list.
* @param <V0> the vector type in the collection
* @param <V1> the type of vector in the search collection
* @param collection the collection to search from
* @param search the vectors to search for
* @param k the number of nearest neighbors
* @return The list of lists for all nearest neighbors
*/
public static <V0 extends Vec, V1 extends Vec> List<List<? extends VecPaired<V0, Double>>> allNearestNeighbors(VectorCollection<V0> collection, List<V1> search, int k)
{
List<List<? extends VecPaired<V0, Double>>> results = new ArrayList<>(search.size());
for(Vec v : search)
results.add(collection.search(v, k));
return results;
}
/**
* Searches the given collection for the <tt>k</tt> nearest neighbors for every data point in the given search list.
* @param <V0> the vector type in the collection
* @param <V1> the type of vector in the search array
* @param collection the collection to search from
* @param search the vectors to search for
* @param k the number of nearest neighbors
* @return The list of lists for all nearest neighbors
*/
public static <V0 extends Vec, V1 extends Vec> List<List<? extends VecPaired<V0, Double>>> allNearestNeighbors(VectorCollection<V0> collection, V1[] search, int k)
{
return allNearestNeighbors(collection, Arrays.asList(search), k);
}
/**
* Searches the given collection for the <tt>k</tt> nearest neighbors for every data point in the given search list.
* @param <V0> the vector type in the collection
* @param <V1> the type of vector in the search collection
* @param collection the collection to search from
* @param search the vectors to search for
* @param k the number of nearest neighbors
* @param threadpool the source of threads to perform the computation in parallel
* @return The list of lists for all nearest neighbors
* @deprecated This will be deleted soon
*/
public static <V0 extends Vec, V1 extends Vec> List<List<? extends VecPaired<V0, Double>>> allNearestNeighbors(final VectorCollection<V0> collection, List<V1> search, final int k, ExecutorService threadpool)
{
List<List<? extends VecPaired<V0, Double>>> results = new ArrayList<>(search.size());
List<Future<List<List<? extends VecPaired<V0, Double>>>>> subResults = new ArrayList<>(LogicalCores);
for(final List<V1> subSearch : ListUtils.splitList(search, LogicalCores))
{
subResults.add(threadpool.submit(() ->
{
List<List<? extends VecPaired<V0, Double>>> subResult = new ArrayList<>(subSearch.size());
for(Vec v : subSearch )
subResult.add(collection.search(v, k));
return subResult;
}));
}
try
{
for (List<List<? extends VecPaired<V0, Double>>> subResult : ListUtils.collectFutures(subResults))
results.addAll(subResult);
}
catch (ExecutionException | InterruptedException ex)
{
Logger.getLogger(VectorCollectionUtils.class.getName()).log(Level.SEVERE, null, ex);
}
return results;
}
/**
* Searches the given collection for the <tt>k</tt> nearest neighbors for every data point in the given search list.
* @param <V0> the vector type in the collection
* @param <V1> the type of vector in the search collection
* @param collection the collection to search from
* @param search the vectors to search for
* @param k the number of nearest neighbors
* @param parallel {@code true} if multiple threads should be used to
* perform clustering. {@code false} if it should be done in a single
* threaded manner.
* @return The list of lists for all nearest neighbors
*/
public static <V0 extends Vec, V1 extends Vec> List<List<? extends VecPaired<V0, Double>>> allNearestNeighbors(final VectorCollection<V0> collection, List<V1> search, final int k, boolean parallel)
{
return ParallelUtils.streamP(search.stream(), parallel)
.map(v -> collection.search(v, k))
.collect(Collectors.toList());
}
/**
* Searches the given collection for all the neighbors within a distance of <tt>radius</tt> for every data point in the given search list.
* @param <V0> the vector type in the collection
* @param <V1> the type of vector in the search collection
* @param collection the collection to search from
* @param search the vectors to search for
* @param radius the distance to search for neighbors
* @param threadpool the source of threads to perform the computation in parallel
* @return The list of lists for all nearest neighbors
*/
public static <V0 extends Vec, V1 extends Vec> List<List<? extends VecPaired<V0, Double>>> allEpsNeighbors(final VectorCollection<V0> collection, List<V1> search, final double radius, ExecutorService threadpool)
{
List<List<? extends VecPaired<V0, Double>>> results = new ArrayList<>(search.size());
List<Future<List<List<? extends VecPaired<V0, Double>>>>> subResults = new ArrayList<>(LogicalCores);
for(final List<V1> subSearch : ListUtils.splitList(search, LogicalCores))
{
subResults.add(threadpool.submit(() ->
{
List<List<? extends VecPaired<V0, Double>>> subResult = new ArrayList<>(subSearch.size());
for(Vec v : subSearch )
subResult.add(collection.search(v, radius));
return subResult;
}));
}
try
{
for (List<List<? extends VecPaired<V0, Double>>> subResult : ListUtils.collectFutures(subResults))
results.addAll(subResult);
}
catch (ExecutionException | InterruptedException ex)
{
Logger.getLogger(VectorCollectionUtils.class.getName()).log(Level.SEVERE, null, ex);
}
return results;
}
/**
* Searches the given collection for all the neighbors within a distance of <tt>radius</tt> for every data point in the given search list.
* @param <V0> the vector type in the collection
* @param <V1> the type of vector in the search collection
* @param collection the collection to search from
* @param search the vectors to search for
* @param radius the distance to search for neighbors
* @param parallel {@code true} if multiple threads should be used to
* perform clustering. {@code false} if it should be done in a single
* threaded manner.
* @return The list of lists for all nearest neighbors
*/
public static <V0 extends Vec, V1 extends Vec> List<List<? extends VecPaired<V0, Double>>> allEpsNeighbors(final VectorCollection<V0> collection, List<V1> search, final double radius, boolean parallel)
{
return ParallelUtils.streamP(search.stream(), parallel)
.map(v -> collection.search(v, radius))
.collect(Collectors.toList());
}
/**
* Searches the given collection for the <tt>k</tt> nearest neighbors for every data point in the given search list.
* @param <V0> the vector type in the collection
* @param <V1> the type of vector in the search collection
* @param collection the collection to search from
* @param search the vectors to search for
* @param k the number of nearest neighbors
* @param threadpool the source of threads to perform the computation in parallel
* @return The list of lists for all nearest neighbors
*/
public static <V0 extends Vec, V1 extends Vec> List<List<? extends VecPaired<V0, Double>>> allNearestNeighbors(final VectorCollection<V0> collection, V1[] search, final int k, ExecutorService threadpool)
{
return allNearestNeighbors(collection, Arrays.asList(search), k, threadpool);
}
/**
* Computes statistics about the distance of the k'th nearest neighbor for each data point in the <tt>search</tt> list.
*
* @param <V0> the type of vector in the collection
* @param <V1> the type of vector in the search collection
* @param collection the collection of vectors to query from
* @param search the list of vectors to search for
* @param k the nearest neighbor to use
* @return the statistics for the distance of the k'th nearest neighbor from the query point
*/
public static <V0 extends Vec, V1 extends Vec> OnLineStatistics getKthNeighborStats(VectorCollection<V0> collection, List<V1> search, int k)
{
OnLineStatistics stats = new OnLineStatistics();
for(Vec v : search)
stats.add(collection.search(v, k).get(k-1).getPair());
return stats;
}
/**
* Computes statistics about the distance of the k'th nearest neighbor for each data point in the <tt>search</tt> list.
*
* @param <V0> the type of vector in the collection
* @param <V1> the type of vector in the search array
* @param collection the collection of vectors to query from
* @param search the array of vectors to search for
* @param k the nearest neighbor to use
* @return the statistics for the distance of the k'th nearest neighbor from the query point
*/
public static <V0 extends Vec, V1 extends Vec> OnLineStatistics getKthNeighborStats(VectorCollection<V0> collection, V1[] search, int k)
{
return getKthNeighborStats(collection, Arrays.asList(search), k);
}
/**
* Computes statistics about the distance of the k'th nearest neighbor for each data point in the <tt>search</tt> list.
*
* @param <V0> the type of vector in the collection
* @param <V1> the type of vector in the search collection
* @param collection the collection of vectors to query from
* @param search the list of vectors to search for
* @param k the nearest neighbor to use
* @param threadpool the source of threads to perform the computation in parallel
* @return the statistics for the distance of the k'th nearest neighbor from the query point
*/
public static <V0 extends Vec, V1 extends Vec> OnLineStatistics getKthNeighborStats(final VectorCollection<V0> collection, List<V1> search, final int k, ExecutorService threadpool)
{
List<Future<OnLineStatistics>> futureStats = new ArrayList<Future<OnLineStatistics>>(LogicalCores);
for(final List<V1> subSearch : ListUtils.splitList(search, LogicalCores))
{
futureStats.add(threadpool.submit(new Callable<OnLineStatistics>() {
public OnLineStatistics call() throws Exception
{
OnLineStatistics stats = new OnLineStatistics();
for(Vec v: subSearch)
stats.add(collection.search(v, k).get(k-1).getPair());
return stats;
}
}));
}
OnLineStatistics stats = new OnLineStatistics();
try
{
for (OnLineStatistics subResult : ListUtils.collectFutures(futureStats))
stats = OnLineStatistics.add(stats, subResult);
}
catch (ExecutionException ex)
{
Logger.getLogger(VectorCollectionUtils.class.getName()).log(Level.SEVERE, null, ex);
}
catch (InterruptedException ex)
{
Logger.getLogger(VectorCollectionUtils.class.getName()).log(Level.SEVERE, null, ex);
}
return stats;
}
/**
* Computes statistics about the distance of the k'th nearest neighbor for each data point in the <tt>search</tt> list.
*
* @param <V0> the type of vector in the collection
* @param <V1> the type of vector in the search array
* @param collection the collection of vectors to query from
* @param search the array of vectors to search for
* @param k the nearest neighbor to use
* @param threadpool the source of threads to perform the computation in parallel
* @return the statistics for the distance of the k'th nearest neighbor from the query point
*/
public static <V0 extends Vec, V1 extends Vec> OnLineStatistics getKthNeighborStats(final VectorCollection<V0> collection, V1[] search, final int k, ExecutorService threadpool)
{
return getKthNeighborStats(collection, Arrays.asList(search), k, threadpool);
}
}
| 13,447 | 45.532872 | 215 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/lsh/E2LSH.java | package jsat.linear.vectorcollection.lsh;
import java.util.List;
import jsat.distributions.Normal;
import jsat.linear.Vec;
import static java.lang.Math.*;
import java.util.*;
import jsat.linear.DenseVector;
import jsat.linear.VecPaired;
import jsat.linear.VecPairedComparable;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.linear.distancemetrics.ManhattanDistance;
import jsat.utils.IntList;
import jsat.utils.IntSet;
import jsat.utils.random.RandomUtil;
/**
* This is an implementation of Locality Sensitive Hashing for the
* {@link ManhattanDistance L<sub>1</sub>} and
* {@link EuclideanDistance L<sub>2</sub> } distance metrics. This is
* essentially a vector collection that can only perform a radius search for a
* pre-defined radius. In addition, the results are only approximate - not all
* of the correct points may be returned, and it is possible no points will be
* returned when the truth is that some data points do exist.
* <br><br>
* Searching is done using the {@link #searchR(jsat.linear.Vec, boolean) }
* methods. While the set of points returned is approximate, the distance values
* are exact. This is because no approximate distance is available, so the
* distances must be computed to remove violators.
* <br><br>
* LSH may be useful if any of the following apply to your problem<br>
* <ul>
* <li>Only need to do a radius searches of a small number of fixed size
* increments</li>
* <li>You need only the first few nearest neighbors, and can compute a
* threshold for the NN</li>
* <li>Approximate neighbor results do not heavily impact the results of your
* algorithm</li>
* <li>You want to find near-duplicates in a data set</li>
* </ul>
* <br><br>
* This implementation is based heavily on the following, but is not an
* exact re-implementation.
* <br><br>
* See:<br>
* <ul>
* <li>Datar, M., Immorlica, N., Indyk, P.,&Mirrokni, V. S. (2004). <i>
* Locality-sensitive hashing scheme based on p-stable distributions</i>.
* Proceedings of the twentieth annual symposium on Computational geometry -
* SCG ’04 (pp. 253–262). New York, New York, USA: ACM Press.
* doi:10.1145/997817.997857</li>
* <li> Andoni, Alex (2005).
* <a href="http://www.mit.edu/~andoni/LSH/manual.pdf">E2LSH Manual 0.1</a></li>
* </ul>
*
* @author Edward Raff
*/
public class E2LSH<V extends Vec>
{
private List<V> vecs;
private DistanceMetric dm;
private double radius;
private double eps;
private double p1;
private double p2;
private int w;
private double c;
/**
* 1-delta is the probability of correctly selecting the neighbors within
* radius R
*/
private double delta = Double.NaN;
private int L;
private int k;
private List<Double> distCache;
private Vec[][] h;
private double[][] b;
private List<Map<Integer, List<Integer>>> tables;
/**
* Creates a new LSH scheme for a given distance metric
* @param vecs the set of vector to place into the LSH
* @param radius the searchR radius for vectors
* @param eps the approximation error, where vectors as fast as R(1+eps) are
* likely to be returned. Must be positive.
* @param w the projection radius. If given a value <= 0, a default value
* of 4 will be used.
* @param k the number of hash functions to conjoin into the final hash per
* vector. If a value <= 0 is given, a default value will be computed.
* @param delta (1-delta) will be the desired minimum probability of
* correctly selecting the correct nearest neighbor if there is only 1-NN
* within a distance of {@code radius}. It will be used to determine some
* number {@link #getL() } hash tables to reach the desired probability.
* 0.10 is a good value.
* @param dm the distance metric to use, must be {@link EuclideanDistance}
* or {@link ManhattanDistance}.
* @param distCache the distance acceleration cache to use, if {@code null},
* and it is supported, one will not be built. This is provided to a void
* redundant calculation when initializing multiple LSH tables using the
* same data set.
*/
public E2LSH(List<V> vecs, double radius, double eps, int w, int k, double delta, DistanceMetric dm, List<Double> distCache)
{
this.vecs = vecs;
setRadius(radius);
this.delta = delta;
setEps(eps);
if(w <= 0)
this.w = 4;
else
this.w = w;
setDistanceMetric(dm);
this.distCache = distCache;
if(k <= 0)
this.k = (int) ceil(log(vecs.size())/log(1/p2));
else
this.k = k;
if(delta <= 0 || delta >= 1)
throw new IllegalArgumentException("dleta must be in range (0,1)");
L = (int)ceil(log(1/delta)/-log(1-pow(p1, this.k)));
// L = (int) ceil(pow(vecs.size(), log(1/p1)/log(1/p2)));
Random rand = RandomUtil.getRandom();
createTablesAndHashes(rand);
}
/**
* Creates a new LSH scheme for a given distance metric
* @param vecs the set of vector to place into the LSH
* @param radius the searchR radius for vectors
* @param eps the approximation error, where vectors as fast as R(1+eps) are
* likely to be returned. Must be positive.
* @param w the projection radius. If given a value <= 0, a default value
* of 4 will be used.
* @param k the number of hash functions to conjoin into the final hash per
* vector. If a value <= 0 is given, a default value will be computed.
* @param delta (1-delta) will be the desired minimum probability of
* correctly selecting the correct nearest neighbor if there is only 1-NN
* within a distance of {@code radius}. It will be used to determine some
* number {@link #getL() } hash tables to reach the desired probability.
* 0.10 is a good value.
* @param dm the distance metric to use, must be {@link EuclideanDistance}
* or {@link ManhattanDistance}.
*/
public E2LSH(List<V> vecs, double radius, double eps, int w, int k, double delta, DistanceMetric dm)
{
this(vecs, radius, eps, w, k, delta, dm, dm.getAccelerationCache(vecs));
}
/**
* Performs a search for points within the set {@link #getRadius() radius}
* of the query point.
* @param q the query point to search near
* @return a list of vectors paired with their true distance from the query
* point that are within the desired radius of the query point
*/
public List<? extends VecPaired<Vec, Double>> searchR(Vec q)
{
return searchR(q, false);
}
/**
* Performs a search for points within the set {@link #getRadius() radius}
* of the query point.
* @param q the query point to search near
* @param approx whether or not to return results in the approximate query
* range
* @return a list of vectors paired with their true distance from the query
* point that are within the desired radius of the query point
*/
public List<? extends VecPaired<Vec, Double>> searchR(Vec q, boolean approx)
{
List<VecPairedComparable<Vec, Double>> toRet = new ArrayList<VecPairedComparable<Vec, Double>>();
Set<Integer> candidates = new IntSet();
for (int l = 0; l < L; l++)
{
int hash = hash(l, q);
List<Integer> list = tables.get(l).get(hash);
for(int id : list)
candidates.add(id);
}
final List<Double> q_qi = dm.getQueryInfo(q);
final double R = approx ? radius*getC() : radius;
for(int id : candidates)
{
double trueDist = dm.dist(id, q, q_qi, vecs, distCache);
if(trueDist <= R)
toRet.add(new VecPairedComparable<Vec, Double>(vecs.get(id), trueDist));
}
Collections.sort(toRet);
return toRet;
}
private int hash(int l, Vec v)
{
final int[] vals = new int[k];
for(int i = 0; i < k; i++)
vals[i] = (int) floor( ( (v.dot(h[l][i])/radius)+b[l][i])/w );
return Arrays.hashCode(vals);
}
private void setEps(double eps)
{
this.eps = eps;
this.c = eps+1;
}
/**
* Returns the multiplier used on the radius that controls the degree
* of approximation.
* @return the radius approximation multiplier > 1
*/
public double getC()
{
return c;
}
/**
* Returns the desired approximate radius for which to return results
* @return the radius in use
*/
public double getRadius()
{
return radius;
}
/**
* Returns how many separate hash tables have been created for this distance
* metric.
* @return the number of hash tables in use
*/
public int getL()
{
return L;
}
/**
* Returns the exact value value that should be used with the euclidean
* distance for the P2 probability .
* @param w the projection distance
* @param c the approximation constant > > 1
* @return the exact P2 value to use
*/
private static double getP2L2(double w, double c)
{
return 1 - 2 * Normal.cdf(-w/c, 0, 1)-2/(sqrt(2*PI)*w/c)*(1-exp(-w*w/(2*c*c)));
}
/**
* Returns the exact value value that should be used with the manhattan
* distance for the P2 probability .
* @param w the projection distance
* @param c the approximation constant > > 1
* @return the exact P2 value to use
*/
private static double getP2L1(double w, double c)
{
return 2*atan(w/c)/PI-log(1+pow(w/c, 2))/(PI*w/c);
}
/**
* Creates and initializes the tables of hash functions for {@link #h} and
* {@link #b}
* @param rand source of randomness
*/
private void createTablesAndHashes(Random rand)
{
int D = vecs.get(0).length();
h = new Vec[L][k];
b = new double[L][k];
for(int l = 0; l < L; l++)
for(int i = 0; i < k; i++)
{
DenseVector dv = new DenseVector(D);
for(int j = 0; j < D; j++)
dv.set(j, rand.nextGaussian());
h[l][i] = dv;
b[l][i] = rand.nextDouble()*w;
}
tables = new ArrayList<Map<Integer, List<Integer>>>(L);
for(int l = 0; l < L; l++)
{
tables.add(new HashMap<Integer, List<Integer>>());
for(int id = 0; id < vecs.size(); id++)
{
int hash = hash(l, vecs.get(id));
List<Integer> ints = tables.get(l).get(hash);
if(ints == null)
{
ints = new IntList(3);
tables.get(l).put(hash, ints);
}
ints.add(id);
}
}
}
/**
* Sets the distance metric and {@link #p1} and {@link #p2}. Must be called
* after {@link #setEps(double) } and {@link #w} are set.
* @param dm the distance metric to use
*/
private void setDistanceMetric(DistanceMetric dm)
{
if(dm instanceof EuclideanDistance || dm instanceof ManhattanDistance)
{
this.dm = dm;
if(dm instanceof EuclideanDistance)
{
p1 = getP2L2(w, 1);
p2 = getP2L2(w, c);
}
else
{
p1 = getP2L1(w, 1);
p2 = getP2L1(w, c);
}
}
else
throw new IllegalArgumentException("only Euclidean and Manhatan (L1 and L2 norm) distances are supported");
}
private void setRadius(double radius)
{
if(Double.isInfinite(radius) || Double.isNaN(radius) || radius <= 0)
throw new IllegalArgumentException("Radius must be a positive constant, not " + radius);
this.radius = radius;
}
}
| 12,248 | 33.897436 | 128 | java |
JSAT | JSAT-master/JSAT/src/jsat/linear/vectorcollection/lsh/RandomProjectionLSH.java | package jsat.linear.vectorcollection.lsh;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import jsat.linear.*;
import jsat.linear.distancemetrics.CosineDistance;
import jsat.linear.distancemetrics.CosineDistanceNormalized;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.vectorcollection.VectorCollection;
import jsat.utils.BoundedSortedList;
import jsat.utils.IndexTable;
import jsat.utils.ProbailityMatch;
import jsat.utils.random.RandomUtil;
/**
* An implementation of Locality Sensitive Hashing for the
* {@link CosineDistance} using random projections. This forms a vector
* collection that performs a linear search of the data set, but does so in a
* more efficient manner by comparing hamming distances in a byte array.
* However, the reported distances are only approximations - and may not be
* correct. For this reason the results are also approximate.
* <br><br>
* See:<br>
* <ul>
* <li>Charikar, M. S. (2002). <i>Similarity estimation techniques from rounding
* algorithms</i>. Proceedings of the thiry-fourth annual ACM symposium on
* Theory of computing - STOC ’02 (pp. 380–388). New York, New York, USA:
* ACM Press. doi:10.1145/509907.509965</li>
* <li>Durme, B. Van,&Lall, A. (2010). <i>Online Generation of Locality
* Sensitive Hash Signatures</i>. Proceedings of the ACL 2010 Conference Short
* Papers (pp. 231–235). Stroudsburg, PA, USA.</li>
* </ul>
*
* @author Edward Raff
*/
public class RandomProjectionLSH<V extends Vec> implements VectorCollection<V>
{
private static final long serialVersionUID = -2042964665052386855L;
private static final int NO_POOL = -1;
private Matrix randProjMatrix;
private int[] projections;
private int slotsPerEntry;
private List<V> vecs;
/*
* Implemtation note: store an integer for the bits, but if we use a small
* 64 bit encoding, we waste anotehr 32-64 bits on object overhead, so store
* it all in one big array isntead of individual ones. Haming distance is
* the number of bit differences, and sinx XOR results in a 1 bit only if
* the bits are not the same, we can use the bit count of the XORed value to
* count the hamming distance.
*/
private ThreadLocal<Vec> tempVecs;
/**
* Creates a new Random Projection LSH object that uses a full matrix of
* normally distributed values.
*
* @param vecs the list of vectors to form a collection for
* @param ints the number of integers to use for the encoding
* @param inMemory {@code true} to construct the full matrix in memory, or
* {@code false} to construct the needed values on demand. This reduces
* memory use at increased CPU usage.
*/
public RandomProjectionLSH(List<V> vecs, int ints, boolean inMemory)
{
randProjMatrix = new NormalMatrix(ints*Integer.SIZE, vecs.get(0).length(), NO_POOL);
if(inMemory)
{
Matrix dense = new DenseMatrix(randProjMatrix.rows(), randProjMatrix.cols());
dense.mutableAdd(randProjMatrix);
randProjMatrix = dense;
}
build(true, vecs, new CosineDistance());
}
/**
* Creates a new Random Projection LSH object that uses a pool of normally
* distributed values to approximate a full matrix with considerably less
* memory storage.
*
* @param vecs the list of vectors to form a collection for
* @param ints the number of integers to use for the encoding
* @param poolSize the number of normally distributed random variables to
* store. Matrix values will be pulled on demand from an index in the pool
* of values.
*/
public RandomProjectionLSH(List<V> vecs, int ints, int poolSize)
{
randProjMatrix = new NormalMatrix(ints*Integer.SIZE, vecs.get(0).length(), poolSize);
build(true, vecs, new CosineDistance());
}
/**
* Copy Constructor
* @param toCopy the object to copy
*/
protected RandomProjectionLSH(RandomProjectionLSH<V> toCopy)
{
this.randProjMatrix = toCopy.randProjMatrix.clone();
this.projections = Arrays.copyOf(toCopy.projections, toCopy.projections.length);
this.slotsPerEntry = toCopy.slotsPerEntry;
this.vecs = new ArrayList<>(toCopy.vecs);
this.tempVecs = new ThreadLocal<Vec>()
{
@Override
protected Vec initialValue()
{
return new DenseVector(randProjMatrix.rows());
}
};
}
@Override
public List<Double> getAccelerationCache()
{
return null;
}
@Override
public void build(boolean parallel, List<V> collection, DistanceMetric dm)
{
setDistanceMetric(dm);
this.vecs = new ArrayList<>(collection);
tempVecs = ThreadLocal.withInitial(()->new DenseVector(randProjMatrix.rows()));
slotsPerEntry = randProjMatrix.rows()/Integer.SIZE;
projections = new int[slotsPerEntry*vecs.size()];
Vec projected = tempVecs.get();
for(int slot = 0; slot < vecs.size(); slot++)
{
projected.zeroOut();
projectVector(vecs.get(slot), slot*slotsPerEntry, projections, projected);
}
}
@Override
public void search(Vec query, double range, List<Integer> neighbors, List<Double> distances)
{
int minHammingDist = (int) cosineToHamming(CosineDistance.distanceToCosine(range));
final int[] queryProj = new int[slotsPerEntry];
Vec tmpSapce = tempVecs.get();
tmpSapce.zeroOut();
projectVector(query, 0, queryProj, tmpSapce);
for(int slot = 0; slot < vecs.size(); slot++)
{
int hamming = 0;
int pos = 0;
while(pos < slotsPerEntry)
hamming += Integer.bitCount(projections[slot*slotsPerEntry+pos]^queryProj[pos++]);
if(hamming <= minHammingDist)
{
neighbors.add(slot);
distances.add(CosineDistance.cosineToDistance(hammingToCosine(hamming)));
}
}
IndexTable it = new IndexTable(distances);
it.apply(neighbors);
it.apply(distances);
}
@Override
public void search(Vec query, int numNeighbors, List<Integer> neighbors, List<Double> distances)
{
BoundedSortedList<ProbailityMatch<Integer>> toRet = new BoundedSortedList<>(numNeighbors);
final int[] queryProj = new int[slotsPerEntry];
Vec tmpSapce = tempVecs.get();
tmpSapce.zeroOut();
projectVector(query, 0, queryProj, tmpSapce);
for(int slot = 0; slot < vecs.size(); slot++)
{
int hamming = 0;
int pos = 0;
while(pos < slotsPerEntry)
hamming += Integer.bitCount(projections[slot*slotsPerEntry+pos]^queryProj[pos++]);
if(toRet.size() < numNeighbors || hamming < toRet.last().getProbability())
toRet.add(new ProbailityMatch<>(hamming, slot));
}
//now conver the hamming values to distance values
for(int i = 0; i < toRet.size(); i++)
{
neighbors.add(toRet.get(i).getMatch());
distances.add(CosineDistance.cosineToDistance(hammingToCosine(toRet.get(i).getProbability())));
}
}
/**
* Returns the signature or encoding length in bits.
* @return the signature length in bits
*/
public int getSignatureBitLength()
{
return randProjMatrix.rows()*Integer.SIZE;
}
/**
* Projects a given vector into the array of integers.
*
* @param vecs the vector to project
* @param slot the index into the array to start placing the bit values
* @param projected a vector full of zeros of the same length as
* {@link #getSignatureBitLength() } to use as a temp space.
*/
private void projectVector(Vec vec, int slot, int[] projLocation, Vec projected)
{
randProjMatrix.multiply(vec, 1.0, projected);
int pos = 0;
int bitsLeft = Integer.SIZE;
int curVal = 0;
while(pos < slotsPerEntry)
{
while(bitsLeft > 0)
{
curVal <<= 1;
if(projected.get(pos*Integer.SIZE+(Integer.SIZE-bitsLeft)) >= 0)
curVal |= 1;
bitsLeft--;
}
projLocation[slot+pos] = curVal;
curVal = 0;
bitsLeft = Integer.SIZE;
pos++;
}
}
@Override
public int size()
{
return vecs.size();
}
@Override
public V get(int indx)
{
return vecs.get(indx);
}
@Override
public VectorCollection<V> clone()
{
return new RandomProjectionLSH<>(this);
}
/**
* Matrix of random normal N(0, 1) values
*/
private static final class NormalMatrix extends RandomMatrix
{
private static final long serialVersionUID = -5274754647385324984L;
private final double[] pool;
private final long seedMult;
public NormalMatrix(int rows, int cols, int poolSize)
{
super(rows, cols);
if(poolSize > 0)
{
pool = new double[poolSize];
Random rand = RandomUtil.getRandom();
for(int i = 0; i < pool.length; i++)
pool[i] = rand.nextGaussian();
}
else
pool = null;
seedMult = RandomUtil.getRandom().nextLong();
}
public NormalMatrix(NormalMatrix toCopy)
{
super(toCopy);
if(toCopy.pool == null)
this.pool = null;
else
this.pool = Arrays.copyOf(toCopy.pool, toCopy.pool.length);
seedMult = toCopy.seedMult;
}
@Override
public double get(int i, int j)
{
if(pool == null)
return super.get(i, j);
else
{
long index = ((i+1)*(j+cols())*seedMult) & Integer.MAX_VALUE;
return pool[(int)index % pool.length];
}
}
@Override
protected double getVal(Random rand)
{
if(pool == null)
return rand.nextGaussian();
else
return pool[rand.nextInt(pool.length)];
}
@Override
public Matrix clone()
{
return new NormalMatrix(this);
}
}
private double hammingToCosine(double ham)
{
return Math.cos(ham*Math.PI/randProjMatrix.rows());
}
private double cosineToHamming(double cos)
{
return randProjMatrix.rows()*Math.acos(cos)/Math.PI;
}
@Override
public void setDistanceMetric(DistanceMetric dm)
{
if(!(dm instanceof CosineDistance || dm instanceof CosineDistanceNormalized))
throw new IllegalArgumentException("RandomProjectionLSH is only compatible with the Cosine Distance metric");
}
@Override
public DistanceMetric getDistanceMetric()
{
return new CosineDistance();
}
}
| 11,465 | 31.76 | 125 | java |
JSAT | JSAT-master/JSAT/src/jsat/lossfunctions/AbsoluteLoss.java | package jsat.lossfunctions;
/**
* The AbsoluteLoss loss function for regression <i>L(x, y) = |x-y|</i>.
* <br>
* This function is only one differentiable.
*
* @author Edward Raff
*/
public class AbsoluteLoss implements LossR
{
private static final long serialVersionUID = -3398199227407867808L;
/**
* Computes the absolute loss
*
* @param pred the predicted value
* @param y the target value
* @return the loss for the functions
*/
public static double loss(double pred, double y)
{
return Math.abs(y - pred);
}
/**
* Returns the derivative of the absolute loss
*
* @param pred the predicted value
* @param y the target value
* @return the derivative of the loss function
*/
public static double deriv(double pred, double y)
{
return Math.signum(pred - y);
}
public static double regress(double score)
{
return score;
}
@Override
public double getLoss(double pred, double y)
{
return loss(pred, y);
}
@Override
public double getDeriv(double pred, double y)
{
return deriv(pred, y);
}
@Override
public double getDeriv2(double pred, double y)
{
return 0;
}
@Override
public double getConjugate(double b, double pred, double y)
{
return b*y;
}
@Override
public double getDeriv2Max()
{
return 0;
}
@Override
public AbsoluteLoss clone()
{
return this;
}
@Override
public double getRegression(double score)
{
return score;
}
@Override
public double lipschitz()
{
//see Stochastic Dual Coordinate Ascent Methods for Regularized Loss Minimization
//"absolute deviation loss are 1-Lipschitz."
return 1.0;
}
}
| 1,864 | 18.840426 | 89 | java |
JSAT | JSAT-master/JSAT/src/jsat/lossfunctions/EpsilonInsensitiveLoss.java | package jsat.lossfunctions;
/**
* The ε-insensitive loss for regression <i>L(x, y) =
* max(0, |x-y|-ε)</i> is the common loss function used for Support
* Vector Regression. <br>
* When ε = 0, the loss becomes equivalent to the {@link AbsoluteLoss}.
*
* @author Edward Raff
*/
public class EpsilonInsensitiveLoss implements LossR
{
private static final long serialVersionUID = -8735274561429676350L;
/**
* Computes the ε-insensitive loss
*
* @param pred the predicted value
* @param y the true value
* @param eps the epsilon tolerance
* @return the ε-insensitive loss
*/
public static double loss(double pred, double y, double eps)
{
final double x = Math.abs(pred - y);
return Math.max(0, x-eps);
}
/**
* Computes the first derivative of the ε-insensitive loss
*
* @param pred the predicted value
* @param y the true value
* @param eps the epsilon tolerance
* @return the first derivative of the ε-insensitive loss
*/
public static double deriv(double pred, double y, double eps)
{
final double x = pred - y;
if(eps < Math.abs(x))
return Math.signum(x);
else
return 0;
}
private double eps;
/**
* Creates a new Epsilon Insensitive loss
* @param eps the epsilon tolerance on error
*/
public EpsilonInsensitiveLoss(double eps)
{
if(eps < 0 || Double.isNaN(eps) || Double.isInfinite(eps))
throw new IllegalArgumentException("Epsilon must be non-negative, not " + eps);
this.eps = eps;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public EpsilonInsensitiveLoss(EpsilonInsensitiveLoss toCopy)
{
this.eps = toCopy.eps;
}
@Override
public double getLoss(double pred, double y)
{
return loss(pred, y, eps);
}
@Override
public double getDeriv(double pred, double y)
{
return deriv(pred, y, eps);
}
@Override
public double getDeriv2(double pred, double y)
{
return 0;
}
@Override
public double getConjugate(double b, double pred, double y)
{
//from "Regression tasks in machine learning via Fenchel duality"
if(Math.abs(b) < eps)
return b*y+eps*Math.abs(b);
return Double.POSITIVE_INFINITY;
}
@Override
public double getDeriv2Max()
{
return 0;
}
@Override
public EpsilonInsensitiveLoss clone()
{
return new EpsilonInsensitiveLoss(this);
}
@Override
public double getRegression(double score)
{
return score;
}
@Override
public double lipschitz()
{
//see "Are Loss Functions All the Same?"
return 1;
}
}
| 2,895 | 22.737705 | 91 | java |
JSAT | JSAT-master/JSAT/src/jsat/lossfunctions/HingeLoss.java | package jsat.lossfunctions;
import jsat.classifiers.CategoricalResults;
import jsat.linear.Vec;
/**
* The HingeLoss loss function for classification <i>L(x, y) = max(0, 1-y*x)</i>
* . This also includes the multi-class version of the hinge loss.
* <br>
* This function is only once differentiable.
*
* @author Edward Raff
*/
public class HingeLoss implements LossMC
{
private static final long serialVersionUID = -7001702646530236153L;
/**
* Computes the HingeLoss loss
*
* @param pred the predicted value
* @param y the target value
* @return the HingeLoss loss
*/
public static double loss(double pred, double y)
{
return Math.max(0, 1 - y * pred);
}
/**
* Computes the first derivative of the HingeLoss loss
*
* @param pred the predicted value
* @param y the target value
* @return the first derivative of the HingeLoss loss
*/
public static double deriv(double pred, double y)
{
if (pred * y > 1)
return 0;
else
return -y;
}
public static CategoricalResults classify(double score)
{
CategoricalResults cr = new CategoricalResults(2);
if(score > 0)
cr.setProb(1, 1.0);
else
cr.setProb(0, 1.0);
return cr;
}
@Override
public double getLoss(double pred, double y)
{
return loss(pred, y);
}
@Override
public double getDeriv(double pred, double y)
{
return deriv(pred, y);
}
@Override
public double getDeriv2(double pred, double y)
{
return 0;
}
@Override
public double getDeriv2Max()
{
return 0;
}
@Override
public HingeLoss clone()
{
return this;
}
@Override
public CategoricalResults getClassification(double score)
{
return classify(score);
}
@Override
public double getLoss(Vec processed, int y)
{
double max_not_y = Double.NEGATIVE_INFINITY;
for(int i = 0; i < processed.length(); i++)
if(i != y)
max_not_y = Math.max(max_not_y, processed.get(i));
return Math.max(0, 1.0+max_not_y-processed.get(y));
}
@Override
public void process(Vec pred, Vec processed)
{
if(pred != processed)
pred.copyTo(processed);
}
@Override
public void deriv(Vec processed, Vec derivs, int y)
{
final double proccessed_y = processed.get(y);
double maxVal_not_y = Double.NEGATIVE_INFINITY;
int maxIndx = -1;
for(int i = 0; i < processed.length(); i++)
if(i != y && processed.get(i) > maxVal_not_y)
{
maxIndx = i;
maxVal_not_y = processed.get(i);
}
derivs.zeroOut();
if(1.0 + maxVal_not_y - proccessed_y > 0)
{
derivs.set(y, -1.0);
derivs.set(maxIndx, 1.0);
}
}
@Override
public CategoricalResults getClassification(Vec processed)
{
int maxIndx = 0;
double maxVal_not_y = processed.get(maxIndx);
for(int i = 1; i < processed.length(); i++)
if(processed.get(i) > maxVal_not_y)
{
maxIndx = i;
maxVal_not_y = processed.get(i);
}
CategoricalResults toRet = new CategoricalResults(processed.length());
toRet.setProb(maxIndx, 1.0);
return toRet;
}
@Override
public double getConjugate(double b, double pred, double y)
{
if(b < -1 || b > 0)
return Double.POSITIVE_INFINITY;
//else
return b;
}
@Override
public double lipschitz()
{
//"Both hinge loss and absolute deviation loss are 1-Lipschitz"
//Shalev-Shwartz, S., & Zhang, T. (2012). Stochastic Dual Coordinate Ascent Methods for Regularized Loss Minimization. Journal ofMachine Learning Research, 14, 567–599. Machine Learning; Learning; Optimization and Control. Retrieved from http://arxiv.org/abs/1209.1873
return 1;
}
}
| 4,148 | 24.29878 | 276 | java |
JSAT | JSAT-master/JSAT/src/jsat/lossfunctions/HuberLoss.java | package jsat.lossfunctions;
/**
* The HuberLoss loss function for regression. The HuberLoss loss switches between
* {@link SquaredLoss} and {@link AbsoluteLoss} loss based on a threshold value.
* <br>
* This function is only partially twice differentiable.
*
* @author Edward Raff
*/
public class HuberLoss implements LossR
{
private static final long serialVersionUID = -4463269746356262940L;
private double c;
/**
* Creates a new HuberLoss loss
*
* @param c the threshold to switch between the squared and logistic loss at
*/
public HuberLoss(double c)
{
this.c = c;
}
/**
* Creates a new HuberLoss loss thresholded at 1
*/
public HuberLoss()
{
this(1);
}
/**
* Computes the HuberLoss loss
*
* @param pred the predicted value
* @param y the true value
* @param c the threshold value
* @return the HuberLoss loss
*/
public static double loss(double pred, double y, double c)
{
final double x = y - pred;
if (Math.abs(x) <= c)
return x * x * 0.5;
else
return c * (Math.abs(x) - c / 2);
}
/**
* Computes the first derivative of the HuberLoss loss
*
* @param pred the predicted value
* @param y the true value
* @param c the threshold value
* @return the first derivative of the HuberLoss loss
*/
public static double deriv(double pred, double y, double c)
{
double x = pred-y;
if (Math.abs(x) <= c)
return x;
else
return c * Math.signum(x);
}
/**
* Computes the second derivative of the HuberLoss loss, which only exists for
* values < {@code c}
*
* @param pred the predicted value
* @param y the target value
* @param c the threshold value
* @return the second derivative of the HuberLoss loss
*/
public static double deriv2(double pred, double y, double c)
{
if (Math.abs(pred-y) < c)
return 1;
else
return 0;
}
public static double regress(double score)
{
return score;
}
@Override
public double getLoss(double pred, double y)
{
return loss(pred, y, c);
}
@Override
public double getDeriv(double pred, double y)
{
return deriv(pred, y, c);
}
@Override
public double getDeriv2(double pred, double y)
{
return deriv2(pred, y, c);
}
@Override
public double getConjugate(double b, double pred, double y)
{
//from "Regression tasks in machine learning via Fenchel duality"
if(-c < b && b < c)
return b*b*0.5+b*y;
return Double.POSITIVE_INFINITY;
}
@Override
public double getDeriv2Max()
{
return 1;
}
@Override
public HuberLoss clone()
{
return new HuberLoss(c);
}
@Override
public double getRegression(double score)
{
return score;
}
@Override
public double lipschitz()
{
//TODO: is this tighter?
//We also observe that |fc''(x)| ≤ 1/c. This implies that fc(x) is 1/c-Lipschitz continuous.
//Huber interpolated between Asbolute loss and Squared loss, with are both L-Lipschitz, so Huber is too
return 1;
}
}
| 3,381 | 22.164384 | 111 | java |
JSAT | JSAT-master/JSAT/src/jsat/lossfunctions/LogisticLoss.java | package jsat.lossfunctions;
import static java.lang.Math.*;
import jsat.classifiers.CategoricalResults;
/**
* The LogisticLoss loss function for classification <i>L(x, y) =
* log(1+exp(-y*x))</i>.
* <br>
* This function is twice differentiable.
*
* @author Edward Raff
*/
public class LogisticLoss implements LossC
{
/*
* NOTE: 30 used as a threshold b/c at the small values exp(-30) stradles
* the edge of numerical double precision
*/
private static final long serialVersionUID = -3929171604513497068L;
/**
* Computes the logistic loss
*
* @param pred the predicted value
* @param y the true value
* @return the logistic loss
*/
public static double loss(double pred, double y)
{
final double x = -y * pred;
if (x >= 30)//as x -> inf, L(x) -> x. At 30 exp(x) is O(10^13), getting unstable. L(x)-x at this value is O(10^-14), also avoids exp and log ops
return x;
else if (x <= -30)
return 0;
return log(1 + exp(x));
}
/**
* Computes the first derivative of the logistic loss
*
* @param pred the predicted value
* @param y the true value
* @return the first derivative of the logistic loss
*/
public static double deriv(double pred, double y)
{
final double x = y * pred;
if (x >= 30)
return 0;
else if (x <= -30)
return y;
return -y / (1 + exp(y * pred));
}
/**
* Computes the second derivative of the logistic loss
*
* @param pred the predicted value
* @param y the true value
* @return the second derivative of the logistic loss
*/
public static double deriv2(double pred, double y)
{
final double x = y * pred;
if (x >= 30)
return 0;
else if (x <= -30)
return 0;
final double p = 1 / (1 + exp(y * pred));
return p * (1 - p);
}
public static CategoricalResults classify(double score)
{
CategoricalResults cr = new CategoricalResults(2);
final double p;
if (score > 30)
p = 1.0;
else if (score < -30)
p = 0.0;
else
p = 1 / (1 + Math.exp(-score));
cr.setProb(0, 1 - p);
cr.setProb(1, p);
return cr;
}
@Override
public double getLoss(double pred, double y)
{
return loss(pred, y);
}
@Override
public double getDeriv(double pred, double y)
{
return deriv(pred, y);
}
@Override
public double getDeriv2(double pred, double y)
{
return deriv2(pred, y);
}
@Override
public double getDeriv2Max()
{
return 1.0 / 4.0;
}
@Override
public LogisticLoss clone()
{
return this;
}
@Override
public CategoricalResults getClassification(double score)
{
return classify(score);
}
@Override
public double getConjugate(double b, double pred, double y)
{
if(b < 0 || b > 1)
return Double.POSITIVE_INFINITY;
//else
if(0.5-abs(0.5-b) < 1e-13) // you are so close tot he edges of making log(b) return NaN, so lets round you out
return 0;//This is
return b * log(b) + (1-b) * log(1-b); //log(max(1e, b)) done to avoid NaN with -0 inputs
}
@Override
public double lipschitz()
{
return 4;
}
}
| 3,490 | 22.910959 | 152 | java |
JSAT | JSAT-master/JSAT/src/jsat/lossfunctions/LossC.java | package jsat.lossfunctions;
import jsat.classifiers.CategoricalResults;
/**
* Specifies a loss function for binary classification problems.
*
* @author Edward Raff
*/
public interface LossC extends LossFunc
{
/**
* Computes the getLoss for a classification problem.
*
* @param pred the predicted score in (-Infinity, Infinity)
* @param y the true class label in {-1, 1}
* @return the getLoss in [0, Inf)
*/
@Override
public double getLoss(double pred, double y);
/**
* Computes the first derivative of the getLoss function.
*
* @param pred the predicted score in (-Infinity, Infinity)
* @param y the true class label in {-1, 1}
* @return the first derivative of the getLoss
*/
@Override
public double getDeriv(double pred, double y);
/**
* Computes the second derivative of the getLoss function.
*
* @param pred the predicted score in (-Infinity, Infinity)
* @param y the true class label in {-1, 1}
* @return the second derivative of the getLoss function
*/
@Override
public double getDeriv2(double pred, double y);
/**
* Given the score value of a data point, this returns the classification
* results.
*
* @param score the score for a data point
* @return the categorical results with the correct probability values for
* this loss function.
*/
public CategoricalResults getClassification(double score);
@Override
public LossC clone();
}
| 1,546 | 26.625 | 79 | java |
JSAT | JSAT-master/JSAT/src/jsat/lossfunctions/LossFunc.java | package jsat.lossfunctions;
import java.io.Serializable;
/**
* Provides a generic interface for some loss function on some problem that can
* be described with a single real prediction value and a single real expected
* value.
* <br><br>
* A loss function must be non-negative and should be convex.
*
* @author Edward Raff
*/
public interface LossFunc extends Serializable
{
/**
* Computes the loss for some problem.
*
* @param pred the predicted value in (-Infinity, Infinity)
* @param y the true value in (-Infinity, Infinity)
* @return the loss in [0, Inf)
*/
public double getLoss(double pred, double y);
/**
* Computes the first derivative of the loss function.
*
* @param pred the predicted score in (-Infinity, Infinity)
* @param y the true value in (-Infinity, Infinity)
* @return the first derivative of the getLoss
*/
public double getDeriv(double pred, double y);
/**
* Computes the second derivative of the getLoss function.
*
* @param pred the predicted score in (-Infinity, Infinity)
* @param y the true value in (-Infinity, Infinity)
* @return the second derivative of the getLoss function
*/
public double getDeriv2(double pred, double y);
/**
* Computes the result of the conjugate function of this loss. This function
* is generally optional, and should return {@link Double#NaN} if not
* properly implemented. Many optimization algorithms do require a working
* implementation though.
* @param b the primary input to the function
* @param pred the predicted score in (-Infinity, Infinity)
* @param y the true class label in {-1, 1}
* @return the result of the conjugate function of this loss
*/
public double getConjugate(double b, double pred, double y);
/**
* Returns an upper bound on the maximum value of the second derivative. If
* the second derivative does not exist, {@link Double#NaN} is a valid
* result. It is also possible for {@code 0} and
* {@link Double#POSITIVE_INFINITY} to be valid results, and must be checked
* for.
*
* @return the max value of {@link #getDeriv2(double, double) }
*/
public double getDeriv2Max();
/**
* If this loss is L-Lipschitz (1/L Lipschitz smooth), this method will return the value of L. If it is not L-Lipschitz, a value of 0 will be returned.
* @return the L-Lipschitz constant, or 0 if this loss is not L-Lipschitz;
*/
public double lipschitz();
public LossFunc clone();
}
| 2,613 | 33.853333 | 157 | java |
JSAT | JSAT-master/JSAT/src/jsat/lossfunctions/LossMC.java | package jsat.lossfunctions;
import jsat.classifiers.CategoricalResults;
import jsat.linear.Vec;
/**
* Specifies a loss function for multi-class problems. A multi-class loss
* function must support a raw vector of scores for each class, where positive
* values indicate preference for the class associated with the same index. <br>
* <br>
* Calling {@link #process(jsat.linear.Vec, jsat.linear.Vec) } on the raw
* scores is a mandatory first step, and will transform the raw scores into a
* usable form for the loss function. <br>
* <br>
* @author Edward Raff
*/
public interface LossMC extends LossC
{
/**
* Computes the scalar loss for on the given example
* @param processed the vector of raw predictions.
* @param y the true class label in [0, k-1] for <i>k</i> classes
* @return the loss in [0, Inf)
*/
public double getLoss(Vec processed, int y);
/**
* Given the vector of raw outputs for each class, transform it into a new
* vector.
* <br>
* {@code processed} and {@code derivs} may be the same object, and will
* simply have all its values altered if so.
* @param pred the vector of raw predictions
* @param processed the location to store the processed predictions.
*/
public void process(Vec pred, Vec processed);
/**
* Computes the derivatives with respect to each output
* <br>
* {@code processed} and {@code derivs} may be the same object, and will
* simply have all its values altered if so.
* @param processed the processed predictions
* @param derivs the vector to place the derivative of the loss to.
* @param y the true class label in [0, k-1] for <i>k</i> classes
*/
public void deriv(Vec processed, Vec derivs, int y);
/**
* Given the {@link #process(jsat.linear.Vec, jsat.linear.Vec) processed}
* predictions, returns the classification results for said predictions.
* @param processed the processed score/prediction vector
* @return the classification results
*/
public CategoricalResults getClassification(Vec processed);
}
| 2,150 | 36.736842 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/lossfunctions/LossR.java | package jsat.lossfunctions;
/**
* Specifies a getLoss function for regression problems.
*
* @author Edward Raff
*/
public interface LossR extends LossFunc
{
/**
* Computes the getLoss for a regression problem.
*
* @param pred the predicted value in (-Infinity, Infinity)
* @param y the true target value in (-Infinity, Infinity)
* @return the getLoss in [0, Inf)
*/
@Override
public double getLoss(double pred, double y);
/**
* Computes the first derivative of the getLoss function.
*
* @param pred the predicted value in (-Infinity, Infinity)
* @param y the true target value in (-Infinity, Infinity)
* @return the first derivative of the getLoss
*/
@Override
public double getDeriv(double pred, double y);
/**
* Computes the second derivative of the getLoss function.
*
* @param pred the predicted value in (-Infinity, Infinity)
* @param y the true target value in (-Infinity, Infinity)
* @return the second derivative of the getLoss function
*/
@Override
public double getDeriv2(double pred, double y);
/**
* Given the score value of a data point, this returns the correct numeric
* result. For most regression problems this simply returns the score value.
*
* @param score the score for a data point
* @return the correct numeric regression value for this loss function
*/
public double getRegression(double score);
@Override
public LossR clone();
}
| 1,546 | 28.188679 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/lossfunctions/SoftmaxLoss.java | package jsat.lossfunctions;
import jsat.classifiers.CategoricalResults;
import jsat.linear.Vec;
import jsat.math.MathTricks;
/**
* The Softmax loss function is a multi-class generalization of the
* {@link LogisticLoss Logistic loss}.
*
* @author Edward Raff
*/
public class SoftmaxLoss extends LogisticLoss implements LossMC
{
private static final long serialVersionUID = 3936898932252996024L;
@Override
public double getLoss(Vec processed, int y)
{
return -Math.log(processed.get(y));
}
@Override
public void process(Vec pred, Vec processed)
{
if(pred != processed)
pred.copyTo(processed);
MathTricks.softmax(processed, false);
}
@Override
public void deriv(Vec processed, Vec derivs, int y)
{
for(int i = 0; i < processed.length(); i++)
if(i == y)
derivs.set(i, processed.get(i)-1);//-(1-p)
else
derivs.set(i, processed.get(i));//-(0-p)
}
@Override
public CategoricalResults getClassification(Vec processed)
{
return new CategoricalResults(processed.arrayCopy());
}
}
| 1,161 | 23.208333 | 70 | java |
JSAT | JSAT-master/JSAT/src/jsat/lossfunctions/SquaredLoss.java | package jsat.lossfunctions;
/**
* The SquaredLoss loss function for regression <i>L(x, y) = (x-y)<sup>2</sup></i>.
* <br>
* This function is twice differentiable.
*
* @author Edward Raff
*/
public class SquaredLoss implements LossR
{
private static final long serialVersionUID = 130786305325167077L;
/**
* Computes the SquaredLoss loss
*
* @param pred the predicted value
* @param y the true value
* @return the squared loss
*/
public static double loss(double pred, double y)
{
final double x = y - pred;
return x * x * 0.5;
}
/**
* Computes the first derivative of the squared loss
*
* @param pred the predicted value
* @param y the true value
* @return the first derivative of the squared loss
*/
public static double deriv(double pred, double y)
{
return (pred - y);
}
/**
* Computes the second derivative of the squared loss, which is always
* {@code 1}
*
* @param pred the predicted value
* @param y the true value
* @return the second derivative of the squared loss
*/
public static double deriv2(double pred, double y)
{
return 1;
}
public static double regress(double score)
{
return score;
}
@Override
public double getLoss(double pred, double y)
{
return loss(pred, y);
}
@Override
public double getDeriv(double pred, double y)
{
return deriv(pred, y);
}
@Override
public double getDeriv2(double pred, double y)
{
return deriv2(pred, y);
}
@Override
public double getConjugate(double b, double pred, double y)
{
return b*b*0.5+b*y;
}
@Override
public double getDeriv2Max()
{
return 1;
}
@Override
public SquaredLoss clone()
{
return this;
}
@Override
public double getRegression(double score)
{
return score;
}
@Override
public double lipschitz()
{
//see Stochastic Dual Coordinate Ascent Methods for Regularized Loss Minimization
//"Both log loss and squared loss are 1-smooth"
return 1.0;
}
}
| 2,241 | 19.759259 | 89 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/Complex.java | package jsat.math;
import java.io.Serializable;
/**
* A class for representing a complex value by a real and imaginary double pair.
*
* @author Edward Raff
*/
public class Complex implements Cloneable, Serializable
{
private static final long serialVersionUID = -2219274170047061708L;
private double real, imag;
/**
* Returns the complex number representing sqrt(-1)
* @return the complex number <i>i</i>
*/
public static Complex I()
{
return new Complex(0.0, 1.0);
}
/**
* Creates a new Complex number
* @param real the real part of the number
* @param imag the imaginary part of the number
*/
public Complex(double real, double imag)
{
this.real = real;
this.imag = imag;
}
/**
* Returns the real part of this complex number
* @return the real part of this complex number
*/
public double getReal()
{
return real;
}
/**
* Sets the real value part of this complex number
* @param r the new real value
*/
public void setReal(double r)
{
this.real = r;
}
/**
* Sets the imaginary value part of this complex number
* @param imag the new imaginary value
*/
public void setImag(double imag)
{
this.imag = imag;
}
/**
* Returns the imaginary part of this complex number
* @return the imaginary part of this complex number
*/
public double getImag()
{
return imag;
}
/**
* Alters this complex number as if an addition of another complex number was performed.
* @param r the real part of the other number
* @param i the imaginary part of the other number
*/
public void mutableAdd(double r, double i)
{
this.real += r;
this.imag += i;
}
/**
* Alters this complex number to contain the result of the addition of another
* @param c the complex value to add to this
*/
public void mutableAdd(Complex c)
{
mutableAdd(c.real, c.imag);
}
/**
* Creates a new complex number containing the resulting addition of this and another
* @param c the number to add
* @return <tt>this</tt>+c
*/
public Complex add(Complex c)
{
Complex ret = new Complex(real, imag);
ret.mutableAdd(c);
return ret;
}
/**
* Alters this complex number as if a subtraction of another complex number was performed.
* @param r the real part of the other number
* @param i the imaginary part of the other number
*/
public void mutableSubtract(double r, double i)
{
mutableAdd(-r, -i);
}
/**
* Alters this complex number to contain the result of the subtraction of another
* @param c the number to subtract
*/
public void mutableSubtract(Complex c)
{
mutableSubtract(c.real, c.imag);
}
/**
* Creates a new complex number containing the resulting subtracting another from this one
* @param c the number to subtract
* @return <tt>this</tt>-c
*/
public Complex subtract(Complex c)
{
Complex ret = new Complex(real, imag);
ret.mutableSubtract(c);
return ret;
}
/**
* Performs a complex multiplication
*
* @param a the real part of the first number
* @param b the imaginary part of the first number
* @param c the real part of the second number
* @param d the imaginary part of the second number
* @param results an array to store the real and imaginary results in. First index is the real, 2nd is the imaginary.
*/
public static void cMul(double a, double b, double c, double d, double[] results)
{
results[0] = a*c-b*d;
results[1] = b*c+a*d;
}
/**
* Alters this complex number as if a multiplication of another complex number was performed.
* @param c the real part of the other number
* @param d the imaginary part of the other number
*/
public void mutableMultiply(double c, double d)
{
double newR = this.real*c-this.imag*d;
double newI = this.imag*c+this.real*d;
this.real = newR;
this.imag = newI;
}
/**
* Alters this complex number to contain the result of the multiplication of another
* @param c the number to multiply by
*/
public void mutableMultiply(Complex c)
{
mutableMultiply(c.real, c.imag);
}
/**
* Creates a new complex number containing the resulting multiplication between this and another
* @param c the number to multiply by
* @return <tt>this</tt>*c
*/
public Complex multiply(Complex c)
{
Complex ret = new Complex(real, imag);
ret.mutableMultiply(c);
return ret;
}
/**
* Performs a complex division operation. <br>
* The standard complex division performs a set of operations that is
* suseptible to both overflow and underflow. This method is more
* numerically stable while still being relatively fast to execute.
*
* @param a the real part of the first number
* @param b the imaginary part of the first number
* @param c the real part of the second number
* @param d the imaginary part of the second number
* @param results an array to store the real and imaginary results in. First
* index is the real, 2nd is the imaginary.
*/
public static void cDiv(double a, double b, double c, double d, double[] results)
{
/**
* Douglas M. Priest. Efficient scaling for complex division. ACM Trans.
* Math. Softw., 30(4):389–401, 2004
*/
long aa, bb, cc, dd, ss;
double t;
int ha, hb, hc, hd, hz, hw, hs;
/*extract high-order 32 bits to estimate |z| and |w| */
aa = Double.doubleToRawLongBits(a);
bb = Double.doubleToRawLongBits(b);
ha = (int) ((aa >> 32) & 0x7fffffff);
hb = (int) ((bb >> 32) & 0x7fffffff);
hz = (ha > hb)? ha : hb;
cc = Double.doubleToRawLongBits(c);
dd = Double.doubleToRawLongBits(d);
hc = (int) ((cc >> 32) & 0x7fffffff);
hd = (int) ((dd >> 32) & 0x7fffffff);
hw = (hc > hd)? hc : hd;
/* compute the scale factor */
if (hz < 0x07200000 && hw >= 0x32800000 && hw < 0x47100000)
{
/* |z| < 2^-909 and 2^-215 <= |w| < 2^114 */
hs = (((0x47100000 - hw) >> 1) & 0xfff00000) + 0x3ff00000;
}
else
hs = (((hw >> 2) - hw) + 0x6fd7ffff) & 0xfff00000;
ss = ((long) hs) << 32;
/* scale c and d, and compute the quotient */
double ssd = Double.longBitsToDouble(ss);
c *= ssd;
d *= ssd;
t = 1.0 / (c * c + d * d);
c *= ssd;
d *= ssd;
results[0] = (a * c + b * d) * t;
results[1] = (b * c - a * d) * t;
}
/**
* Alters this complex number as if a division by another complex number was performed.
* @param c the real part of the other number
* @param d the imaginary part of the other number
*/
public void mutableDivide(double c, double d)
{
final double[] r = new double[2];
cDiv(real, imag, c, d, r);
this.real = r[0];
this.imag = r[1];
}
/**
* Alters this complex number to contain the result of the division by another
* @param c the number to divide by
*/
public void mutableDivide(Complex c)
{
mutableDivide(c.real, c.imag);
}
/**
* Creates a new complex number containing the resulting division of this by
* another
*
* @param c the number to divide by
* @return <tt>this</tt>/c
*/
public Complex divide(Complex c)
{
Complex ret = new Complex(real, imag);
ret.mutableDivide(c);
return ret;
}
/**
* Computes the magnitude of this complex number, which is
* sqrt({@link #getReal() Re}<sup>2</sup>+{@link #getImag() Im}<sup>2</sup>)
* @return the magnitude of this complex number
*/
public double getMagnitude()
{
return Math.hypot(real, imag);
}
/**
* Computes the Argument, also called phase, of this complex number. Unless
* the result is {@link Double#NaN}, which occurs only for complex zero, the
* result will be in the range [-pi, pi]
* @return the argument of this complex number
*/
public double getArg()
{
return Math.atan2(imag, real);
}
/**
* Alters this complex number so that it represents its complex conjugate
* instead.
*/
public void mutateConjugate()
{
this.imag = -imag;
}
/**
* Returns a new complex number representing the complex conjugate of this
* one
* @return the complex conjugate of <tt>this</tt>
*/
public Complex getConjugate()
{
return new Complex(real, -imag);
}
@Override
public String toString()
{
if(imag == 0)
return Double.toString(real);
else if(real == 0)
return imag + "i";
else
return "("+real + " + " + imag + "i)";
}
@Override
protected Complex clone()
{
return new Complex(real, imag);
}
@Override
public boolean equals(Object obj)
{
return equals(obj, 0.0);
}
@Override
public int hashCode()
{
int hash = 5;
hash = 67 * hash + (int) (Double.doubleToLongBits(this.real) ^ (Double.doubleToLongBits(this.real) >>> 32));
hash = 67 * hash + (int) (Double.doubleToLongBits(this.imag) ^ (Double.doubleToLongBits(this.imag) >>> 32));
return hash;
}
/**
* Checks if <i>this</i> is approximately equal to another Complex object
* @param obj the object to compare against
* @param eps the maximum acceptable difference between values to be
* considered equal
* @return <tt>true</tt> if the objects are approximately equal
*/
public boolean equals(Object obj, double eps)
{
if( obj instanceof Complex)
{
Complex other = (Complex) obj;
if(Math.abs(this.real-other.real) > eps)
return false;
else if(Math.abs((this.imag - other.imag)) > eps)
return false;
return true;
}
return false;
}
}
| 10,650 | 27.402667 | 122 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/ContinuedFraction.java |
package jsat.math;
/**
* This class provides a means to represent and evaluate continued fractions in
* a multitude of ways.
*
* @author Edward Raff
*/
public abstract class ContinuedFraction
{
/**
* The a term of a continued fraction is the value that occurs as one of the
* numerators, an its depth starts at 1.
*
* @param pos the depth of the continued fraction to evaluate at
* @param args the values for the variables of the continued fraction
* @return the value that would be computed for the a coefficient at the
* specified depth of the fraction
*/
abstract public double getA(int pos, double... args);
/**
* The b term of a continued fraction is the value that is added to the
* continuing fraction, its depth starts at 0.
*
* @param pos the depth of the continued fraction to evaluate at
* @param args the values for the variables of the continued fraction
* @return the value that would be computed for the b coefficient at the
* specified depth of the fraction
*/
abstract public double getB(int pos, double... args);
/**
* Approximates the continued fraction using a naive approximation
*
* @param n the number of iterations to perform
* @param args the values to input for the variables of the continued fraction
* @return an approximation of the value of the continued fraciton
*/
public double backwardNaive(int n, double... args)
{
double term = getA(n, args)/getB(n,args);
for(n = n-1; n >0; n--)
{
term = getA(n, args)/(getB(n,args)+term);
}
return term + getB(0, args);
}
/**
* Uses Thompson and Barnett's modified Lentz's algorithm create an
* approximation that should be accurate to full precision.
*
* @param args the numeric inputs to the continued fraction
* @return the approximate value of the continued fraction
*/
public double lentz(double... args)
{
double f_n = getB(0, args);
if(f_n == 0.0)
f_n = 1e-30;
double c_n, c_0 = f_n;
double d_n, d_0 = 0;
double delta = 0;
int j = 0;
while(Math.abs(delta - 1) > 1e-15)
{
j++;
d_n = getB(j, args) + getA(j, args)*d_0;
if(d_n == 0.0)
d_n = 1e-30;
c_n = getB(j, args) + getA(j, args)/c_0;
if(c_n == 0.0)
c_n = 1e-30;
d_n = 1/d_n;
delta = c_n*d_n;
f_n *= delta;
d_0 = d_n;
c_0 = c_n;
}
return f_n;
}
}
| 2,820 | 28.082474 | 82 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/DescriptiveStatistics.java |
package jsat.math;
import jsat.linear.Vec;
/**
*
* @author Edward Raff
*/
public class DescriptiveStatistics
{
/**
* Computes the sample correlation coefficient for two data sets X and Y. The lengths of X and Y must be the same, and each element in X should correspond to the element in Y.
*
* @param yData the Y data set
* @param xData the X data set
* @return the sample correlation coefficient
*/
public static double sampleCorCoeff(Vec xData, Vec yData)
{
if(yData.length() != xData.length())
throw new ArithmeticException("X and Y data sets must have the same length");
double xMean = xData.mean();
double yMean = yData.mean();
double topSum = 0;
for(int i = 0; i < xData.length(); i++)
{
topSum += (xData.get(i)-xMean)*(yData.get(i)-yMean);
}
return topSum/((xData.length()-1)*xData.standardDeviation()*yData.standardDeviation());
}
/**
* Computes several summary statistics from the two data sets. These are: <br>
*
* Index 0: S<sub>x</sub> <br>
* Index 1: S<sub>y</sub> <br>
* Index 2: S<sub>xx</sub> <br>
* Index 3: S<sub>yy</sub> <br>
* Index 4: S<sub>xy</sub>
*
* @param xData the x values of the data set
* @param yData the y values of the data set
* @return several summary statistics of the 2 variables
*/
public static double[] summaryStats(Vec xData, Vec yData)
{
double[] values = new double[5];
//Sx, sum of x values
values[0] = xData.sum();
//Sy, sum of y values
values[1] = yData.sum();
double tmp = 0;
//Sxx
for(int i = 0; i < xData.length(); i++)
tmp += Math.pow(xData.get(i), 2);
values[2] = tmp;
//Syy
tmp = 0;
for(int i = 0; i < xData.length(); i++)
tmp += Math.pow(yData.get(i), 2);
values[3] = tmp;
//Sxy
tmp = 0;
for(int i = 0; i < xData.length(); i++)
tmp += xData.get(i)*yData.get(i);
values[4] = tmp;
return values;
}
}
| 2,237 | 25.329412 | 179 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/ExponentialMovingStatistics.java | /*
* Copyright (C) 2016 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.math;
import java.io.Serializable;
/**
* This class keeps track of a set of Exponential Moving statistics (the mean
* and standard deviation). When considering just the mean, this is often
* referred to as Exponential Moving Average (EMA). Similar to
* {@link OnLineStatistics}, this method will use fixed memory to keep an
* estimate of the mean and standard deviation of a stream of values. However
* this class will adjust to the mean and standard deviation of only recent
* additions, and will "forget" the contribution of earlier values. The rate of
* forgetting is controlled with the {@link #smoothing smoothing} parameter.
*
*
* @author Edward Raff <[email protected]>
*/
public class ExponentialMovingStatistics implements Serializable, Cloneable
{
private double mean;
private double variance;
private double smoothing;
/**
* Creates a new object for keeping an exponential estimate of the mean and
* variance. Uses a relatively low smoothing factor of 0.1
*
*/
public ExponentialMovingStatistics()
{
this(0.1);
}
/**
* Creates a new object for keeping an exponential estimate of the mean and
* variance
*
* @param smoothing the {@link #smoothing smoothing} parameter to use
*/
public ExponentialMovingStatistics(double smoothing)
{
this(smoothing, Double.NaN, 0);
}
/**
* Creates a new object for keeping an exponential estimate of the mean and
* variance
*
* @param smoothing the {@link #smoothing smoothing} parameter to use
* @param mean an initial mean. May be {@link Double#NaN NaN} to indicate no
* initial mean.
* @param variance an initial variance. May be {@link Double#NaN NaN} to
* indicate no initial mean.
*/
public ExponentialMovingStatistics(double smoothing, double mean, double variance)
{
this.mean = mean;
this.variance = variance;
setSmoothing(smoothing);
}
/**
* Sets the smoothing parameter value to use. Must be in the range (0, 1].
* Changing this value will impact how quickly the statistics adapt to
* changes, with larger values increasing rate of change and smaller values
* decreasing it.
*
* @param smoothing the smoothing value to use
*/
public void setSmoothing(double smoothing)
{
if (smoothing <= 0 || smoothing > 1 || Double.isNaN(smoothing))
throw new IllegalArgumentException("Smoothing must be in (0, 1], not " + smoothing);
this.smoothing = smoothing;
}
/**
*
* @return the smoothing parameter in use
*/
public double getSmoothing()
{
return smoothing;
}
/**
* Adds the given data point to the statistics
*
* @param x the new value to add to the moving statistics
*/
public void add(double x)
{
if (Double.isNaN(mean))//fist case
{
mean = x;
variance = 0;
}
else//general case
{
//first update stnd deviation
variance = (1-smoothing)*(variance + smoothing*Math.pow(x-mean, 2));
mean = (1-smoothing)*mean + smoothing*x;
}
}
/**
*
* @return estimate of the moving mean
*/
public double getMean()
{
return mean;
}
/**
*
* @return the estimate of moving variance
*/
public double getVariance()
{
return variance;
}
/**
*
* @return the estimate of moving standard deviation
*/
public double getStandardDeviation()
{
return Math.sqrt(getVariance()+1e-13);
}
}
| 4,457 | 28.523179 | 96 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/FastMath.java | package jsat.math;
import static java.lang.Double.*;
import static java.lang.Math.PI;
import static java.lang.Math.tan;
/**
* This class contains fast implementations of many of the methods located in
* {@link Math} and {@link SpecialMath}. This speed comes at the cost of
* correctness, and in general the methods in this class attempt to have a
* relative error no worse than 10<sup>-3</sup> for most inputs. <br>
* <br>
* Implementation details - and therfore accuracy / speed - may change over
* time. 10<sup>-3</sup> is not a hard guarantee, but a goal. The accuracy near
* asymptotes and extreme values will never be guaranteed. The handling of
* special values will never be guaranteed.
*
* @author Edward Raff
*/
public class FastMath
{
/*
* Exponent biast for doubles is 1023
* so exponentValue-1023 = unbiased value
*/
private static long getMantissa(long bits)
{
return bits & 0x000fffffffffffffL;
}
@SuppressWarnings("unused")
private static long getExponent(long bits)
{
return (bits & 0x7ff0000000000000L) >> 52;
}
private static final double logConst = Math.log(2);
/**
* Computes the natural logarithm of the input
* @param x the input
* @return log<sub>e</sub>(x)
*/
public static double log(double x)
{
return logConst*log2(x);
}
/**
* Computes log<sub>2</sub>(x)
* @param x the input
* @return the log base 2 of {@code x}
*/
public static double log2(double x)
{
return log2_2pd1(x);
}
/**
* Computes log<sub>2</sub>(x) using a Pade approximation. It is slower than
* {@link #log2_c11(double) } but dose not use any extra memory. <br>
* The results are generally accurate to an absolute and relative error of
* 10<sup>-4</sup>, but relative error can get as high as 10<sup>-10</sup>
* @param x the input
* @return the log base 2 of {@code x}
*/
public static double log2_2pd1(double x)
{
if(x < 0)
return Double.NaN;
long rawBits = doubleToLongBits(x);
long mantissa = getMantissa(rawBits);
int e = Math.getExponent(x);
double m = longBitsToDouble(1023L << 52 | mantissa);//m in [1, 2]
double log2m = 1.847320661499000 + 0.240449173481494 * m - 3.651821822250191 / (0.750000000000000 + m);
return log2m + e;
}
/**
* Returns the value floor(log<sub>2</sub>(x)) for an integer
* @param x the integer to get the floored logarithm of
* @return floor(log<sub>2</sub>(x))
*/
public static int floor_log2(int x)
{
return 31 - Integer.numberOfLeadingZeros(x);
}
/**
* Returns the value floor(log<sub>2</sub>(x)) for an integer
* @param x the integer to get the floored logarithm of
* @return floor(log<sub>2</sub>(x))
*/
public static int floor_log2(long x)
{
return 63 - Long.numberOfLeadingZeros(x);
}
static final double[] log2Cache11 = new double[1 << 11];
static
{
for(int i = 0; i < log2Cache11.length; i++)
{
long mantissa = i;
mantissa <<= (52-11);
log2Cache11[i] = Math.log(longBitsToDouble(1023L<<52 | mantissa))/Math.log(2);
}
}
/**
* Computes log<sub>2</sub>(x) using a cache of 2<sup>11</sup> values,
* consuming approximately 17 kilobytes of memory.<br>
* The results are generally accurate to an absolute and relative error of
* 10<sup>-4</sup>
* @param x the input
* @return the log base 2 of {@code x}
*/
public static double log2_c11(double x)
{
if(x < 0)
return Double.NaN;
long rawBits = doubleToLongBits(x);
long mantissa = getMantissa(rawBits);
int e = Math.getExponent(x);
return log2Cache11[(int)(mantissa >>> (52-11))] + e;
}
/**
* Computes 2<sup>x</sup> exactly be exploiting the IEEE format
* @param x the integer power to raise 2 too
* @return 2<sup>x</sup>
*/
public static double pow2(int x)
{
if(x > Double.MAX_EXPONENT)
return Double.POSITIVE_INFINITY;
if(x < Double.MIN_EXPONENT)
return 0;
return longBitsToDouble((x+1023L)<<52);
}
/**
* Computes 2<sup>x</sup>.<br>
* The results are generally accurate to an relative error of
* 10<sup>-4</sup>, but can be as accurate as 10<sup>-10</sup>
*
* @param x the power to raise to
* @return
*/
public static double pow2(double x)
{
if(x > Double.MAX_EXPONENT)
return Double.POSITIVE_INFINITY;
else if(x < Double.MIN_EXPONENT)
return 0;
else if(x < 0)
return 1.0/pow2(-x);
//x is positive at this point
double floorXd = Math.floor(x);
int floorX = (int) floorXd;
double frac = x-floorXd;
double pow2frac = -4.704682932438695+27.543765058113320/(4.828085122666891-frac)-0.490129071734273 * frac;
return pow2frac*longBitsToDouble((floorX+1023L)<<52);
}
/**
* Computes a<sup>b</sup>.<br>
*
*
* @param a the base
* @param b the power
* @return a<sup>b</sup>
*/
public static double pow(double a, double b)
{
/*
* Wright out a^b as 2^(b log2(a)) and then replace a with 'm 2^e' to get
* 2^(b * log2(m*2^e)) which simplifies to
* m^b 2^(b e) when m, e, and b are positive.
*
* m & e are by IEEE defintion positive
*/
if (b < 0)
return 1 / pow(a, -b);//b is now made positive
long rawBits_a = doubleToLongBits(a);
long mantissa_a = getMantissa(rawBits_a);
final int e_a = Math.getExponent(a);
//compute m^b and exploit the fact that we know there is no need for the exponent
double m = longBitsToDouble(1023L << 52 | mantissa_a);//m in [1, 2]
final double log2m = 1.790711564253215 + 0.248597253161674 * m - 3.495545043418375 / (0.714309275671154 + 1.000000000000000 * m);
//we end up with 2^(b * log_2(m)) * 2^(b * e), which we can reduce to a single pow2 call
return pow2(b * log2m + b * e_a);//fun fact, double*int is faster than casting an int to a double...
}
private static final double expPowConst = 1.0/Math.log(2);
/**
* Exponentiates the given input value
* @param x the input
* @return e<sup>x</sup>
*/
public static double exp(double x)
{
return pow2(expPowConst*x);
}
/**
* Computes the digamma function of the input
* @param x the input value
* @return ψ(x)
*/
public static double digamma(double x)
{
if(x == 0)
return Double.NaN;//complex infinity
else if(x < 0)//digamma(1-x) == digamma(x)+pi/tan(pi*x), to make x positive
{
if(Math.rint(x) == x)
return Double.NaN;//the zeros are complex infinity
return digamma(1-x)-PI/tan(PI*x);
}
/*
* shift over 2 values to the left and use truncated approximation
* log(x+2)-1/(2 (x+2))-1/(12 (x+2)^2) -1/x-1/(x+1),
* the x+2 and x and x+1 are grouped sepratly
*/
double xp2 = x+2;
return log(xp2)-(6*x+13)/(12*xp2*xp2)-(2*x+1)/(x*x+x);
}
}
| 7,567 | 29.764228 | 137 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/Function.java |
package jsat.math;
import java.io.Serializable;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
/**
* This functional interface defines a function over a vector input and returns
* a scalar output.
*
* @author Edward Raff
*/
public interface Function extends Serializable
{
/**
* Evaluates the given function for the specified input vector.
* @param x the input to the function
* @return the scalar output of this function
*/
default public double f(double... x)
{
return f(DenseVector.toDenseVec(x));
}
/**
* Evaluates the given function for the specified input vector.
* @param x the input to the function
* @return the scalar output of this function
*/
default public double f(Vec x)
{
return f(x, false);
}
/**
* Evaluates the given function for the specified input vector.
* @param x the input to the function
* @param parallel {@code true} if the function should be evaluated with
* multiple threads, or {@code false} to use a single thread.
* @return the scalar output of this function
*/
public double f(Vec x, boolean parallel);
/**
* Returns a new function that approximates the derivative of the given one
* via numerical forward difference approximation.
*
* @param f the function to approximate the derivative of
* @return a function that will return an estimate of the derivative
*/
public static FunctionVec forwardDifference(Function f)
{
FunctionVec fP = (Vec x, Vec s, boolean parallel) ->
{
if(s == null)
{
s = x.clone();
s.zeroOut();
}
double sqrtEps = Math.sqrt(2e-16);
double f_x = f.f(x, parallel);
Vec x_ph = x.clone();
for(int i = 0; i < x.length(); i++)
{
double h = Math.max(Math.abs(x.get(i))*sqrtEps, 1e-5);
x_ph.set(i, x.get(i)+h);
double f_xh = f.f(x_ph, parallel);
s.set(i, (f_xh-f_x)/h);//set derivative estimate
x_ph.set(i, x.get(i));
}
return s;
};
return fP;
}
}
| 2,335 | 27.487805 | 79 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/Function1D.java | /*
* Copyright (C) 2017 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.math;
/**
* This functional interface defines a one dimensional (single input) function.
*
* @author Edward Raff
*/
public interface Function1D
{
public double f(double x);
}
| 889 | 30.785714 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/FunctionMat.java | package jsat.math;
import java.util.concurrent.ExecutorService;
import jsat.linear.Matrix;
import jsat.linear.Vec;
/**
* Interface for representing a function that should return a Matrix object as
* the result.
*
* @author Edward Raff
*/
public interface FunctionMat
{
/**
* Computes a matrix based on multivariate input
* @param x the variables to evaluate as part of the function
* @return the matrix output of the function
*/
public Matrix f(double... x);
/**
* Computes a matrix based on multivariate input
* @param x the variables to evaluate as part of the function
* @return the matrix output of the function
*/
public Matrix f(Vec x);
/**
* Computes a matrix based on multivariate input
* @param x the variables to evaluate as part of the function
* @param s the matrix to store the result in, or {@code null} if a new
* matrix should be allocated
* @return the matrix containing the results. This is the same object as
* {@code s} if {@code s} is not {@code null}
*/
public Matrix f(Vec x, Matrix s);
/**
* Computes a matrix based on multivariate input
* @param x the variables to evaluate as part of the function
* @param s the matrix to store the result in, or {@code null} if a new
* matrix should be allocated
* @param ex the source of threads to use for the computation
* @return the matrix containing the results. This is the same object as
* {@code s} if {@code s} is not {@code null}
*/
public Matrix f(Vec x, Matrix s, ExecutorService ex);
}
| 1,636 | 31.74 | 79 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/FunctionVec.java | package jsat.math;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
/**
* Interface for representing a function that takes a vector as input should
* return a vector as the output.
*
* @author Edward Raff
*/
public interface FunctionVec
{
/**
* Computes the function value given the input {@code x}
* @param x the input to compute the output from
* @return the vector containing the results
*/
default public Vec f(double... x)
{
return f(DenseVector.toDenseVec(x));
}
/**
* Computes the function value given the input {@code x}
* @param x the input to compute the output from
* @return the vector containing the results
*/
default public Vec f(Vec x)
{
return f(x, null);
}
/**
* Computes the function value given the input {@code x}
* @param x the input to compute the output from
* @param s the vector to store the result in, or {@code null} if a new
* vector should be allocated
* @return the vector containing the results. This is the same object as
* {@code s} if {@code s} is not {@code null}
*/
default public Vec f(Vec x, Vec s)
{
return f(x, s, false);
}
/**
* Computes the function value given the input {@code x}
* @param x the input to compute the output from
* @param s the vector to store the result in, or {@code null} if a new
* vector should be allocated
* @param parallel {@code true} if multiple threads should be used for
* evaluation, {@code false} if only a single thread should.
* @return the vector containing the results. This is the same object as
* {@code s} of {@code s} is not {@code null}
*/
public Vec f(Vec x, Vec s, boolean parallel);
}
| 1,805 | 29.610169 | 77 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/IndexFunction.java |
package jsat.math;
import jsat.linear.Vec;
/**
*
* @author Edward Raff
*/
public interface IndexFunction extends Function
{
/**
* An index function, meant to be applied to vectors where the
* value to be computed may vary based on the position in the
* vector of the value.
*
* @param value the value at the specified index
* @param index the index the value is from
* @return the computed result. If a negative index was given,
* the function should return 0.0 if indexFunc(0,indx) would
* return zero for all valid indices. If this is not the case, any non zero value should be returned.
*
*/
abstract public double indexFunc(double value, int index);
@Override
default public double f(Vec x, boolean parallel)
{
return indexFunc(x.get(0), (int)x.get(1));
}
}
| 868 | 26.15625 | 106 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/MathTricks.java | package jsat.math;
import static java.lang.Math.*;
import jsat.linear.Vec;
/**
* This class provides utilities for performing specific arithmetic patterns in
* numerically stable / efficient ways.
*
* @author Edward Raff
*/
public class MathTricks
{
private MathTricks()
{
}
/**
*
* @param vals an array of values to get the maximum of
* @return the maximum of all given values
*/
public static double max(double... vals)
{
double m = Double.NEGATIVE_INFINITY;
for(double v : vals)
m = Math.max(v, m);
return m;
}
/**
*
* @param vals an array of values to get the maximum of
* @return the maximum of all given values
*/
public static double min(double... vals)
{
double m = Double.NEGATIVE_INFINITY;
for(double v : vals)
m = Math.min(v, m);
return m;
}
/**
* Provides a numerically table way to perform the log of a sum of
* exponentiations. The computed result is <br>
* log(<big>∑</big><sub> ∀ val ∈ vals</sub> exp(val) )
*
* @param vals the array of values to exponentiate and add
* @param maxValue the maximum value in the array
* @return the log of the sum of the exponentiated values
*/
public static double logSumExp(Vec vals, double maxValue)
{
double expSum = 0.0;
for(int i = 0; i < vals.length(); i++)
expSum += exp(vals.get(i)-maxValue);
return maxValue + log(expSum);
}
/**
* Provides a numerically table way to perform the log of a sum of
* exponentiations. The computed result is <br>
* log(<big>∑</big><sub> ∀ val ∈ vals</sub> exp(val) )
*
* @param vals the array of values to exponentiate and add
* @param maxValue the maximum value in the array
* @return the log of the sum of the exponentiated values
*/
public static double logSumExp(double[] vals, double maxValue)
{
double expSum = 0.0;
for(int i = 0; i < vals.length; i++)
expSum += exp(vals[i]-maxValue);
return maxValue + log(expSum);
}
/**
* Applies the softmax function to the given array of values, normalizing
* them so that each value is equal to<br><br>
* exp(x<sub>j</sub>) / Σ<sub>∀ i</sub> exp(x<sub>i</sub>)
*
* @param x the array of values
* @param implicitExtra {@code true} if the softmax will assume there is
* an extra implicit value not included in the array with a value of 0.0
*/
public static void softmax(double[] x, boolean implicitExtra)
{
double max = implicitExtra ? 1 : Double.NEGATIVE_INFINITY;
for(int i = 0; i < x.length; i++)
max = max(max, x[i]);
double z =implicitExtra ? exp(-max) : 0;
for (int c = 0; c < x.length; c++)
z += (x[c] = exp(x[c] - max));
for (int c = 0; c < x.length; c++)
x[c] /= z;
}
/**
* Applies the softmax function to the given array of values, normalizing
* them so that each value is equal to<br><br>
* exp(x<sub>j</sub>) / Σ<sub>∀ i</sub> exp(x<sub>i</sub>)<br>
* Note: If the input is sparse, this will end up destroying sparsity
*
* @param x the array of values
* @param implicitExtra {@code true} if the softmax will assume there is
* an extra implicit value not included in the array with a value of 0.0
*/
public static void softmax(Vec x, boolean implicitExtra)
{
double max = implicitExtra ? 1 : Double.NEGATIVE_INFINITY;
max = max(max, x.max());
double z =implicitExtra ? exp(-max) : 0;
for (int c = 0; c < x.length(); c++)
{
double newVal = exp(x.get(c) - max);
x.set(c, newVal);
z += newVal;
}
x.mutableDivide(z);
}
/**
* This evaluates a polynomial using Horner's method. It is assumed that the
* polynomial is stored in reverse order in the array {@code coef}, ie: from
* c<sub>n</sub> at index 0, and then decreasing.
*
* @param coef the polynomial with coefficients in reverse order
* @param x the value to evaluate the polynomial at
* @return the value of the polynomial at {@code x}
*/
public static double hornerPolyR(double[] coef, double x)
{
double result = 0;
for(double c : coef)
result = result*x+c;
return result;
}
/**
* This evaluates a polynomial using Horner's method. It is assumed that the
* polynomial is stored in order in the array {@code coef}, ie: from
* c<sub>0</sub> at index 0, and then increasing with the index.
*
* @param coef the polynomial with coefficients in reverse order
* @param x the value to evaluate the polynomial at
* @return the value of the polynomial at {@code x}
*/
public static double hornerPoly(double[] coef, double x)
{
double result = 0;
for(int i = coef.length-1; i >= 0; i--)
result = result*x + coef[i];
return result;
}
}
| 5,304 | 31.347561 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/OnLineStatistics.java |
package jsat.math;
import java.io.Serializable;
import java.util.function.BinaryOperator;
/**
*
* This class provides a means of updating summary statistics as each
* new data point is added. The data points are not stored, and values
* are updated with an online algorithm.
* <br>
* As such, this class has constant memory usage, regardless of how many
* values are added. But the results may not be as numerically accurate,
* and can degrade badly given specific data sequences.
*
* @author Edward Raff
*/
public class OnLineStatistics implements Serializable, Cloneable, BinaryOperator<OnLineStatistics>
{
private static final long serialVersionUID = -4286295481362462983L;
/**
* The current mean
*/
private double mean;
/**
* The current number of samples seen
*/
private double n;
//Intermediat value updated at each step, variance computed from it
private double m2, m3, m4;
private Double min, max;
/**
* Creates a new set of statistical counts with no information
*/
public OnLineStatistics()
{
this(0, 0, 0, 0, 0);
}
/**
* Creates a new set of statistical counts with these initial values, and can then be updated in an online fashion
* @param n the total weight of all data points added. This value must be non negative
* @param mean the starting mean. If <tt>n</tt> is zero, this value will be ignored.
* @param variance the starting variance. If <tt>n</tt> is zero, this value will be ignored.
* @param skew the starting skewness. If <tt>n</tt> is zero, this value will be ignored.
* @param kurt the starting kurtosis. If <tt>n</tt> is zero, this value will be ignored.
* @throws ArithmeticException if <tt>n</tt> is a negative number
*/
public OnLineStatistics(double n, double mean, double variance, double skew, double kurt)
{
if(n < 0)
throw new ArithmeticException("Can not have a negative set of weights");
this.n = n;
if(n != 0)
{
this.mean = mean;
this.m2 = variance*(n-1);
this.m3 = Math.pow(m2, 3.0/2.0)*skew/Math.sqrt(n);
this.m4 = (3+kurt)*m2*m2/n;
}
else
this.mean = m2 = m3 = m4 = 0;
min = max = null;
}
private OnLineStatistics(double n, double mean, double m2, double m3, double m4, Double min, Double max)
{
this.n = n;
this.mean = mean;
this.m2 = m2;
this.m3 = m3;
this.m4 = m4;
this.min = min;
this.max = max;
}
/**
* Copy Constructor
* @param other the version to make a copy of
*/
public OnLineStatistics(OnLineStatistics other)
{
this(other.n, other.mean, other.m2, other.m3, other.m4,
other.min, other.max);
}
/**
* Adds a data sample with unit weight to the counts.
* @param x the data value to add
*/
public void add(double x)
{
add(x, 1.0);
}
/**
* Adds a data sample the the counts with the provided weight of influence.
* @param x the data value to add
* @param weight the weight to give the value
* @throws ArithmeticException if a negative weight is given
*/
public void add(double x, double weight)
{
//See http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
if(weight < 0)
throw new ArithmeticException("Can not add a negative weight");
else if(weight == 0)
return;
double n1 = n;
n+=weight;
double delta = x - mean;
double delta_n = delta*weight/n;
double delta_n2 = delta_n*delta_n;
double term1 = delta*delta_n*n1;
mean += delta_n;
m4 += term1 * delta_n2 * (n*n - 3*n + 3) + 6 * delta_n2 * m2 - 4 * delta_n * m3;
m3 += term1 * delta_n * (n - 2) - 3 * delta_n * m2;
m2 += weight*delta*(x-mean);
if(min == null)
min = max = x;
else
{
min = Math.min(min, x);
max = Math.max(max, x);
}
}
/**
* Effectively removes a sample with the given value and weight from the total.
* Removing values that have not been added may yield results that have no meaning
* <br><br>
* NOTE: {@link #getSkewness() } and {@link #getKurtosis() } are not currently updated correctly
*
* @param x the value of the sample
* @param weight the weight of the sample
* @throws ArithmeticException if a negative weight is given
*/
public void remove(double x, double weight)
{
if(weight < 0)
throw new ArithmeticException("Can not remove a negative weight");
else if(weight == 0)
return;
double n1 = n;
n-=weight;
double delta = x - mean;
double delta_n = delta*weight/n;
double delta_n2 = delta_n*delta_n;
double term1 = delta*delta_n*n1;
mean -= delta_n;
m2 -= weight*delta*(x-mean);
//TODO m3 and m4 arent getting updated correctly
m3 -= term1 * delta_n * (n - 2+weight) - 3 * delta_n * m2;
m4 -= term1 * delta_n2 * (n*n - 3*n + 3) + 6 * delta_n2 * m2 - 4 * delta_n * m3;
}
/**
* Computes a new set of statistics that is the equivalent of having removed
* all observations in {@code B} from {@code A}. <br>
* NOTE: removing statistics is not as numerically stable. The values of the
* 3rd and 4th moments {@link #getSkewness() } and {@link #getKurtosis() }
* will be inaccurate for many inputs. The {@link #getMin() min} and
* {@link #getMax() max} can not be determined in this setting, and will not
* be altered.
* @param A the first set of statistics, which must have a larger value for
* {@link #getSumOfWeights() } than {@code B}
* @param B the set of statistics to remove from {@code A}.
* @return a new set of statistics that is the removal of {@code B} from
* {@code A}
*/
public static OnLineStatistics remove(OnLineStatistics A, OnLineStatistics B)
{
OnLineStatistics toRet = A.clone();
toRet.remove(B);
return toRet;
}
/**
* Removes from this set of statistics the observations that where collected
* in {@code B}.<br>
* NOTE: removing statistics is not as numerically stable. The values of the
* 3rd and 4th moments {@link #getSkewness() } and {@link #getKurtosis() }
* will be inaccurate for many inputs. The {@link #getMin() min} and
* {@link #getMax() max} can not be determined in this setting, and will not
* be altered.
* @param B the set of statistics to remove
*/
public void remove(OnLineStatistics B)
{
final OnLineStatistics A = this;
//XXX double compare.
if(A.n == B.n)
{
n = mean = m2 = m3 = m4 = 0;
min = max = null;
return;
}
else if(B.n == 0)
return;//removed nothing!
else if(A.n < B.n)
throw new ArithmeticException("Can not have negative samples");
double nX = A.n-B.n;
double nXsqrd = nX*nX;
double nAnB = B.n*A.n;
double AnSqrd = A.n*A.n;
double BnSqrd = B.n*B.n;
double delta = B.mean - A.mean;
double deltaSqrd = delta*delta;
double deltaCbd = deltaSqrd*delta;
double deltaQad = deltaSqrd*deltaSqrd;
double newMean = (A.n* A.mean - B.n * B.mean)/(A.n - B.n);
double newM2 = A.m2 - B.m2 - deltaSqrd / nX *nAnB;
double newM3 = A.m3 - B.m3 - deltaCbd* nAnB*(A.n - B.n) / nXsqrd - 3 * delta * (A.n * B.m2 - B.n * A.m2)/nX;
double newM4 = A.m4 - B.m4
- deltaQad * (nAnB*(AnSqrd - nAnB + BnSqrd)/(nXsqrd*nX))
- 6 * deltaSqrd*(AnSqrd*B.m2 - BnSqrd*A.m2)/nXsqrd
- 4 * delta *(A.n*B.m3 - B.n*A.m3)/nX;
this.n = nX;
this.mean = newMean;
this.m2 = newM2;
this.m3 = newM3;
this.m4 = newM4;
}
/**
* Computes a new set of counts that is the sum of the counts from the given distributions.
* <br><br>
* NOTE: Adding two statistics is not as numerically stable. If A and B have values of similar
* size and scale, the values of the 3rd and 4th moments {@link #getSkewness() } and
* {@link #getKurtosis() } will suffer from catastrophic cancellations, and may not
* be as accurate.
* @param A the first set of statistics
* @param B the second set of statistics
* @return a new set of statistics that is the addition of the two.
*/
public static OnLineStatistics add(OnLineStatistics A, OnLineStatistics B)
{
OnLineStatistics toRet = A.clone();
toRet.add(B);
return toRet;
}
/**
* Adds to the current statistics all the samples that were collected in
* {@code B}. <br>
* NOTE: Adding two statistics is not as numerically stable. If A and B have values of similar
* size and scale, the values of the 3rd and 4th moments {@link #getSkewness() } and
* {@link #getKurtosis() } will suffer from catastrophic cancellations, and may not
* be as accurate.
* @param B the set of statistics to add to this set
*/
public void add(OnLineStatistics B)
{
final OnLineStatistics A = this;
//XXX double compare.
if(A.n == B.n && B.n == 0)
return;//nothing to do!
else if(B.n == 0)
return;//still nothing!
else if (A.n == 0)
{
this.n = B.n;
this.mean = B.mean;
this.m2 = B.m2;
this.m3 = B.m3;
this.m4 = B.m4;
this.min = B.min;
this.max = B.max;
return;
}
double nX = B.n + A.n;
double nXsqrd = nX*nX;
double nAnB = B.n*A.n;
double AnSqrd = A.n*A.n;
double BnSqrd = B.n*B.n;
double delta = B.mean - A.mean;
double deltaSqrd = delta*delta;
double deltaCbd = deltaSqrd*delta;
double deltaQad = deltaSqrd*deltaSqrd;
double newMean = (A.n* A.mean + B.n * B.mean)/(A.n + B.n);
double newM2 = A.m2 + B.m2 + deltaSqrd / nX *nAnB;
double newM3 = A.m3 + B.m3 + deltaCbd* nAnB*(A.n - B.n) / nXsqrd + 3 * delta * (A.n * B.m2 - B.n * A.m2)/nX;
double newM4 = A.m4 + B.m4
+ deltaQad * (nAnB*(AnSqrd - nAnB + BnSqrd)/(nXsqrd*nX))
+ 6 * deltaSqrd*(AnSqrd*B.m2 + BnSqrd*A.m2)/nXsqrd
+ 4 * delta *(A.n*B.m3 - B.n*A.m3)/nX;
this.n = nX;
this.mean = newMean;
this.m2 = newM2;
this.m3 = newM3;
this.m4 = newM4;
this.min = Math.min(A.min, B.min);
this.max = Math.max(A.max, B.max);
}
@Override
public OnLineStatistics clone()
{
return new OnLineStatistics(n, mean, m2, m3, m4, min, max);
}
/**
* Returns the sum of the weights for all data points added to the statistics.
* If all weights were 1, then this value is the number of data points added.
* @return the sum of weights for every point currently contained in the
* statistics.
*/
public double getSumOfWeights()
{
return n;
}
public double getMean()
{
return mean;
}
/**
* Computes the population variance
* @return the variance of the data seen
*/
public double getVarance()
{
return m2/(n+1e-15);//USED to be unbiased est, but dosn't work for weighted data when the weights may be <= 1. So use biased.
}
public double getStandardDeviation()
{
return Math.sqrt(getVarance());
}
public double getSkewness()
{
return Math.sqrt(n) * m3 / Math.pow(m2, 3.0/2.0);
}
public double getKurtosis()
{
return (n*m4) / (m2*m2) - 3;
}
public double getMin()
{
return min;
}
public double getMax()
{
return max;
}
@Override
public OnLineStatistics apply(OnLineStatistics t, OnLineStatistics u)
{
if(t == null)
return u;
else if( u == null)
return t;
//else, boh are non-null
return OnLineStatistics.add(t, u);
}
}
| 12,356 | 31.179688 | 133 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/SimpleLinearRegression.java |
package jsat.math;
import jsat.linear.Vec;
/**
*
* @author Edward Raff
*/
public class SimpleLinearRegression
{
/**
* Performs a Simple Linear Regression on the data set, calculating the best fit a and b such that y = a + b * x <br><br>
*
* @param yData the Y data set (to be predicted)
* @param xData the X data set (the predictor)
* @return an array containing the a and b, such that index 0 contains a and index 1 contains b
*/
static public double[] regres(Vec xData, Vec yData)
{
//find y = a + B *x
double[] toReturn = new double[2];
//B value
toReturn[1] = DescriptiveStatistics.sampleCorCoeff(xData, yData)*yData.standardDeviation()/xData.standardDeviation();
//a value
toReturn[0] = yData.mean() - toReturn[1]*xData.mean();
return toReturn;
}
}
| 864 | 26.03125 | 125 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/SpecialMath.java |
package jsat.math;
import jsat.distributions.Normal;
import jsat.linear.Vec;
import jsat.math.rootfinding.RiddersMethod;
import static java.lang.Math.*;
import static jsat.math.MathTricks.*;
/**
* This class provides static methods for computing accurate approximations to
* many special functions. <br>
* <br>
* All methods should return absolute differences of less than 10<sup>-9</sup>
* for all reasonable values. Unreasonable values would be those in areas of
* high change (such as those approaching positive / negative infinity).
*
* @author Edward Raff
*/
public class SpecialMath
{
public static final double EULER_MASCHERONI = 0.57721566490153286061;
/*
* TODO this class needs more documentation. All these methods should have
* indications of their expected accuracies.
*/
public static double invXlnX(double y)
{
//Method from Numerical Recipies, 3rd edition
if(y >= 0 || y <= -exp(-1))
throw new ArithmeticException("Inverse value can not be computed for the range [-e^-1, 0]");
double u;
if(y < -0.2)
u = log(exp(-1) - sqrt(2*exp(-1)* (y+exp(-1)) ) );
else
u = 10;
double previousT = 0, t;
do
{
t = (log(y/u)-u)*(u/(1+u));
u += t;
if (t < 1e-8 && abs(t + previousT) < 0.01 * abs(t))
break;
previousT = t;
}
while (abs(t / u) < 1e-15);
return exp(u);
}
/**
* The gamma function is a generalization of the factorial function.
* This method provides the gamma function for values from -Infinity to Infinity.
* <br>Special Values:
* <ul>
* <li>{@link Double#NaN} returned if z = 0 or z = {@link Double#NEGATIVE_INFINITY}</li>
* </ul>
* @param z any real value
* @return Γ(z)
*/
public static double gamma(double z)
{
if(z == 0)//It is actualy infinity*I, where I - sqrt(-1).
return Double.NaN;
else if(z == Double.POSITIVE_INFINITY)
return z;
else if(z < 0)
{
/*
* Using the identity
*
* __
* -||
* gamma(-z) = -------------------
* __
* z gamma(z) sin || z
*/
z = -z;
return -PI/(z*gamma(z)*sin(PI*z));
}
/**
* General case for z > 0, from Numerical Recipes in C (2nd ed. Cambridge University Press, 1992). |error| is <= 2*10^-10 for all z > 0
*
* ____ / 6 \
* / __ | ===== p |
* \/ 2 || | \ n | z + 0.5 - (z + 5.5)
* gamma(z) = ------- |p + > -----| (z + 5.5) e
* z | 0 / z + n|
* | ===== |
* \ n = 1 /
*
*
* see http://www.rskey.org/gamma.htm
*
*/
double p[] =
{
1.000000000190015,76.18009172947146,-86.50532032941677,
24.01409824083091,-1.231739572450155,1.208650973866179e-3,-5.395239384953e-6
};
double innerLoop = 0;
for(int n = 1; n < p.length; n++)
innerLoop += p[n]/(z+n);
double result = p[0] + innerLoop;
result *= sqrt(2*PI)/z;
return result*pow(z+5.5, z+0.5)*exp(-(z+5.5));
}
/**
* Computes the natural logarithm of {@link #gamma(double) }.
* This method is more numerically stable than taking the log of the result of Γ(z).
*
* <br>Special Values:
* <ul>
* <li>{@link Double#POSITIVE_INFINITY} returned if z ≤ 0</li>
* <li>{@link Double#NaN} returned if z = {@link Double#NEGATIVE_INFINITY}</li>
* </ul>
*
* @param z any real number value
* @return Log(Γ(z))
*/
public static double lnGamma(double z)
{
if(z == Double.NEGATIVE_INFINITY)
return Double.NaN;
else if(z == Double.POSITIVE_INFINITY)
return z;
else if(z <= 0)
return Double.POSITIVE_INFINITY;
/*
* Lanczos approximation for the log of the gamma function, with |error| < 10^-15 for all z > 0 (Almost full double precision)
*/
int j;
double x, tmp, y, ser = 0.999999999999997092;
double[] c = new double[]
{
57.1562356658629235, -59.5979603554754912,
14.1360979747417471, -0.491913816097620199, .339946499848118887e-4,
.465236289270485756e-4, -.983744753048795646e-4, .158088703224912494e-3,
-.210264441724104883e-3, .217439618115212643e-3, -.164318106536763890e-3,
.844182239838527433e-4, -.261908384015814087e-4, .368991826595316234e-5
};
y = x = z;
tmp = x+671.0/128.0;
tmp = (x+0.5)*log(tmp)-tmp;
for (j = 0; j < 14; j++)
{
y++;
ser += c[j] / y;
}
return tmp+log(2.5066282746310005*ser/x);
}
/**
* Positive zero of the digamma function
*/
static final private double digammaPosZero = 1.461632144968362341;
/**
* Computes the value of the digamma function, Ψ(x), which is the
* derivative of {@link #lnGamma(double) }. <br>
* <br>
* This method may return:<br>
* <ul>
* <li> {@link Double#NaN} for zero and negative integer values (would be complex infinity)</li>
* </ul>
* <br>
* This method should be accurate to an absolute difference of
* 10<sup>-14</sup> for all values that are not near an asymptote.
*
* @param x the value to compute the digamma function at
* @return the value of Ψ(x)
*/
public static double digamma(double x)
{
if(x == 0)
return Double.NaN;//complex infinity
else if(x < 0)//digamma(1-x) == digamma(x)+pi/tan(pi*x), to make x positive
{
if(Math.rint(x) == x)
return Double.NaN;//the zeros are complex infinity
return digamma(1-x)-PI/tan(PI*x);
}
else if(x < 2)//shift the value into [2, Inf]
return digamma(x+1) - 1/x;
double approx= log(x);//rel error of 10^-11 for x >= 100 10^-13 for x >= 250, near machine precision at x >= 500
double approxS = -1/(2*x);
double approxSS = -1/(12*x*x);
if(x <= 7)
return (x-digammaPosZero)*hornerPolyR(digamma_p_2_7, x)/hornerPolyR(digamma_q_2_7, x);
else if(x <= 70)
return approx + (approxS + (approxSS + hornerPolyR(digamma_p_7_70, x)/hornerPolyR(digamma_q_7_70, x)));
if(x < 500)
return approx + (approxS + (approxSS + hornerPolyR(digamma_adj_p, x)/hornerPolyR(digamma_adj_q, x)));
else
return approx;
}
/**
* The upper polynomial for adjustment to the digamma function approximation
* in the range [5, 500]. Computationally the absolute difference is less
* than 1e-14 after 70
*/
private static final double[] digamma_adj_p = new double[]
{
6.662015538739419312162593679911046099212e-17, 8.191636265707720257639745288823408068504e-14,
3.594113658754209591796105198667912207196e-11, -6.860373206319231677418396622671250368996e-9,
0.05718773229071206996201131377297202073545
};
/**
* The lower polynomial for adjustment to the digamma function approximation
* in the range [5, 500]
*/
private static final double[] digamma_adj_q = new double[]
{
6.862596697230474833874884670160991920179, 0.002471283005727189026784415544975350355939,
3.307589698304449008222736059167667287747, 0.2934987180435057544393612182922529976682,
1.0
};
/**
* The upper polynomial for computing the digamma function, fit against
* psi(x)/(x-1.46163...)
*
* Absolute difference less than 1e-15 for the range [2,7]
*
* Idea from Chebyshev Approximations for the Psi Function. Mathematics of
* Computation, Volume 27, Number 121. By Cody, Strecok, and Thacher,
*/
private static final double[] digamma_p_2_7 = new double[]
{
0.00803356767428942100, 8.71902391724677187,
445.627457353132455, 5678.99715950204957,
23638.5586690114249, 32569.4896509749708,
11137.2906503774953
};
/**
* The lower polynomial for computing the digamma function, fit against
* psi(x)/(x-1.46163...)
*
* Absolute difference less than 1e-15 for the range [2,7]
*/
private static final double[] digamma_q_2_7 = new double[]
{
1.63429827694094891, 123.543728215902577,
2208.64039265513217, 13061.4741667968999,
27097.1711564673534, 16271.6188328948966,
1.0
};
/**
* The upper polynomial for approximating the digamma function
* in the range [7, 70]. The coefficients are in reverse order
*
* Absolute difference is less than 1e-14 for the whole range
* (1e-15 for most of it!)
*/
private static final double[] digamma_p_7_70 = new double[]
{
1.184517214829426509228398e-14, -3.115254117966573324590434e-12,
3.376965291465973686259273e-10, -1.965130493876512169028824e-8,
-0.04505398537169693060781875
};
/**
* The lower polynomial for approximating the digamma function in the range
* [7, 70]. The coefficients are in reverse order
*/
private static final double[] digamma_q_7_70 = new double[]
{
-5.406558865634368930280970, 0.001668808697639819914012839,
-2.595288371769254271653798, 0.1463978383403391904410391,
1
};
/**
* Computes the Riemann zeta function ζ(x) for some value of x<br>
* <br>
* This method may return:<br>
* <ul>
* <li> {@link Double#NaN} for x = 1 (would be complex infinity)</li>
* </ul>
* <br>
* <br>
* <b>NOTE:</b> This method is not yet complete in terms of accuracy. <br>
* <ul>
* <li>For x < -0.5, the values returned will be of the correct
* magnitude - but are not very accurate</li>
* <li>For x in [-0.5, 2.5], the values returned will be of reasonable
* accurate (absolute difference around 10<sup>-7</sup>), unless it is very close to 1</li>
* <li>For x > 2.5, the result will be very accurate (absolute difference less than 10<sup>-14</sup></li>
* </ul>
* @param x a real valued input
* @return ζ(x)
*/
public static double zeta(double x)
{
if(x == 1)
return Double.NaN;
if(x == 0)
return -0.5;
if(x < 0 || abs(1-x) <= 0.2)
{
if(x <= 0.2 && x > -2.)
{
/*
* For this specific range we keep our own approximant,
* see below comment
*/
return hornerPolyR(zeta_p_special, x)/hornerPolyR(zeta_q_special, x);
}
/*
* http://dlmf.nist.gov/25.4#E2
*
* Reflect zeta across 1 if negative to make it positive.
*
* Reflect zeta across 1 if it is just less than one so that it will
* be just more than 1 (much easier to compute)
*
* Reflect if just more than 1 to an area that is easier to approximate
*/
double otherPart = 2*pow(2*PI, x-1)*sin(PI/2*x);
if(x < 0)
return otherPart*exp(lnGamma(1-x)*log(zeta(1-x)));
else//log(zeta(1-x)) would have caused a NaN
return otherPart*gamma(1-x)*zeta(1-x);
}
if(x < 14)
return hornerPolyR(zeta_p_l14, x)/hornerPolyR(zeta_q_l14, x);
if(x < 50)
{
//use truncated form of http://dlmf.nist.gov/25.2#E3
double mul = 1/(1-pow(2, 1-x));
double sumP = 0;
double sumN = 0;
for(int i = 11; i >= 1; i-=2)//all odd values are positive
sumP += pow(i, -x);
for(int i = 10; i >= 1; i-=2)//all even values are negative
sumN -= pow(i, -x);
return mul*(sumP+sumN);
}
//else x>=50, 1 is so close we might as well use it
return 1;
}
/**
* Coefficients used for computing the zeta function involving the coef[k_]:=BernoulliB[2 k]/Factorial[2 k] term
*/
private static final double[] zetBernCoefs = new double[]
{
0.0,//FAKE VALUE, Never used
0.083333333333333333333,-0.0013888888888888888889,0.000033068783068783068783,
-8.2671957671957671958e-7,2.0876756987868098979e-8,-5.2841901386874931848e-10,
1.3382536530684678833e-11,-3.3896802963225828668e-13,8.5860620562778445641e-15,
-2.1748686985580618730e-16
};
/**
* Implements the Hurwitz zeta function ζ(x, a) <br>
* The relative error of this implementation is less than 1e-4 for most
* positive values of x, though is lower for negative values of x. Accuracy
* improves as a increases.
*
* @param x the first argument of the Hurwitz zeta function/
* @param a the second argument, must be positive in all cases.
* @return the value of ζ(x, a), or {@link Double#NaN} if the result
* would be a Complex Infinity
*/
public static double zeta(double x, double a)
{
if(x == 0)
return 0.5-a;
if(a <= 0)
return Double.NaN;//Results would be complex infinity
if(x == 1)
return Double.NaN;//Result would be complex infinity
if(a > 1e7 || (x < 0 && x >=-100 && a >= 1e3))
{
//In the limit this is correct. At 1e6, relative error is <= 10^(-8) for x [1, 10,000]
//Its also good for x in [-100, 1] - so use in this case for small q values of 1e3
return (1/(x - 1) + 1/(2*a))*pow(a, 1 - x);
}
if(x < 0)
{
if(a <= 1)//Reflection using https://dlmf.nist.gov/25.11#E9
{
double s = 1-x;
double sum = 0;
for(int n = 1; n <= 20; n++)
sum += pow(n, -s) * cos(PI*0.5*s-2*n*PI*a);
return exp(log(2) + lnGamma(s) - s*log(2*PI))*sum;
}
else //reduce a using https://dlmf.nist.gov/25.11.E4
{
double m = Math.floor(a);
if(m == a)//a was an integer, so lets adjust m by 1
m--;
//Now a_new will be in (0, 1]
double a_new = a-m;
double sum = 0;
for(int n = (int) (m-1); n >= 0; n--)
{
double t = pow(n+a_new, -x);
sum += t;
if (t / sum < 1e-6)//eventually our contributions will be tiny, break when it happens
break;
}
return zeta(x, a_new) - sum;
}
}
//The error on this seems high... so lets not do that for now
// if(x <= -1 && Math.rint(x) == x && x >= Integer.MIN_VALUE)//Use https://dlmf.nist.gov/25.11.E13
// {
// int n = (int) -x;
// return -exp(reLnBn(n+1)+log(a)-log(n+1));
// }
//General case, done as outlined here https://math.stackexchange.com/questions/917100/numerical-evaluation-of-hurwitz-zeta-function?rq=1
double part1 = 0;
double part2 = 0;
final int n = 9;//Constant chosen by recomendation for double precision
for(int k = 0; k <= n; k++)
{
part1 += 1/pow(a+k, x);
}
//Second summerant term is zetBernCoefs[k] * ( ((a+n)^(-k-x) Gamma[-2+3 k+x])/Gamma[-2+2 k+x] )
//simplfy 2nd term with logs as exp( -(k+x) Log[a+n]+Log[Gamma[-2+3 k+x]]-Log[Gamma[-2+2 k+x]] )
for(int k = 1; k < zetBernCoefs.length; k++ )
{
part2 += zetBernCoefs[k] * exp(-(k+x)* log(a+n)+lnGamma(-2+3*k+x)-lnGamma(-2+2*k+x));
}
return part1 + pow(a+n, 1-x)/(x-1) - 1./(2*pow(a+n, x)) + part2 ;
}
/**
* Computes the n'th harmonic number <i>H<sub>n</sub></i>. <br>
* Note that the relative error of this method is less than 10<sup>-6</sup>
* for the entire range, and decreases as <i>n</i> increases.
*
* @param n any non-negative value
* @return the value of <i>H<sub>n</sub></i>, of {@link Double#NaN} if <i>n</i> < 0
*/
public static double harmonic(double n)
{
if(n == 0)
return 0;
else if(n < 0)
return Double.NaN;
if(n < 2)//Lets use Economized Rational Approximations
{
final double[] era_up_4 = new double[]
{
0.00067701604350, 5483.1926096, 4283.42774099, 602.59193727
};
final double[] era_low_5 = new double[]
{
3333.39376261, 5039.82176406, 1856.44874041, 140.548664241, -1.000000000000
};
return hornerPoly(era_up_4, n)/hornerPoly(era_low_5, n);
}
//else, this
double h_n = log(n) + EULER_MASCHERONI + 1./(2*n);
if(n >= 1000000)
return h_n;//Close enough with rel error < 5e-15
//We are going to add at least two terms then
//1/(240 n^8)-1/(252 n^6)+1/(120 n^4)-1/(12 n^2)
double nSqrd = n*n;
double nQuad = nSqrd*nSqrd;
h_n += -1/(12 * nSqrd) + 1/(120 * nQuad);
double n8 = nQuad*nQuad;
double n6 = nSqrd*nQuad;
h_n += +(1/(240*n8))-1/(252*n6);
return h_n;
}
/**
* Computes the generalized n'th harmonic number of the m'th order
* <i>H<sub>n</sub><sup>(m)</sup></i>. <br>
*
* @param n any non-negative value
* @param m the harmonic order
* @return the value of <i>H<sub>n</sub><sup>(m)</sup></i>
*/
public static double harmonic(double n, double m)
{
if (n == 0)
return 0;
else if (n < 0)
if (m == 0)
return n;
else
return Double.NaN;//truth is complex infinity
else if(m == 1)
return harmonic(n);
return zeta(m)-zeta(m,n+1);
}
/**
* upper polynomial approximation of the zeta function between [-0.20, 0.5]
*/
private static double[] zeta_p_special =
{
-5.276454584406249e-6, 0.00014685004463733906,
-0.0029925134974932046, -0.03542393126377964,
-0.2062582384669163, -0.5425801056911627,
-0.500000000000001
};
/**
* Lower polynomial approximation of the zeta function between [-0.20, 0.5]
*/
private static double[] zeta_q_special =
{
-3.9917203368386765e-6, -0.00017067854372054898,
-0.0029262260848543224, -0.03374331488845255,
-0.21043893362112356, -0.75271685502711,
1
};
/**
* Upper polynomial approximation of the zeta function between [1.20, 14]
*/
private static double[] zeta_p_l14 =
{
-8.60637125178308808e-8, -1.15605577219727645e-6,
-0.0000431166632155845063, -0.000526489508370994743,
-0.00681551580000143337, -0.0630725028974146228,
-0.294618454483912025, -0.634306044238860948,
-0.500000284011974157
};
/**
* Lower polynomial approximation of the zeta function between [1.20, 14]
*/
private static double[] zeta_q_l14 =
{
-8.61872960765516585e-8, -1.13850129664146526e-6,
-0.0000442195744615244028, -0.000486888035876849659,
-0.00768589078947058848, -0.0516310207074782699,
-0.370887845301558816, -0.569262911426117336,
1
};
/**
* Contains the exact values for Re[Log[Bn[x]]] for all even values less than 50
*/
private static final double[] reLnBn_sub_50 =
{
0,
-1.79175946922805500, -3.40119738166215538, -3.73766961828336831,
-3.40119738166215538, -2.58021682959232517, -1.37391706441133552,
0.154150679827258304, 1.95898950623372649, 4.00680901051075934,
6.27122326708809952, 8.73103330984004285, 11.3688270444561602,
14.1700452298205194, 17.1223324620051408, 20.2150715380410726,
23.4390405118087582, 26.7861544648980854, 30.2492673319812623,
33.8220172715222676, 37.4987042389444309, 41.2741917911127339,
45.1438274079056960, 49.1033771613463772, 53.1489716411456250
};
/**
* Computes the real part of the natural logarithm of the Bernoulli numbers.
* <br> The Bernoulli zeros for odd n will return
* {@link Double#NEGATIVE_INFINITY} and for any value less than 0 will
* return {@link Double#NaN}.
* <br><br>
* <br><br>
* Currently only accurate to an absolute difference of 10<sup>-11</sup>
*
* @param n the integer Bernoulli value to obtain an approximation of
* @return <i>Re(Log(B<sub>n</sub>))</i>
*/
public static double reLnBn(int n)
{
if(n < 0)
return Double.NaN;
if(n == 1)
return -log(2);
if(n % 2 != 0)
return Double.NEGATIVE_INFINITY;
if(n >= 50)//rel err < 1e-14
{
//Log[-2^(3/2 - n) ((3 i)/e)^n n^(1/2 + n) ((3 + 40 n^2)/(-1 + 120 n^2))^n Pi^(1/2 - n)]
double x = 0;
x += (3.0/2.0-n)*log(2);//ignoring + imaginary pi here
x += n*(log(3)-1);//ignoring + imaginary pi/2 in the parenthesis
x += (n+0.5)*log(n);
x += n*(log(40*n*n+3)-log(120*n*n-1));
x += (0.5-n)*log(PI);
return x;
}
return reLnBn_sub_50[n/2];
}
/**
* Computes an approximation to the n'th Bernoulli number
* <i>B<sub>n</sub></i>.
* The Bernoulli numbers grow in value rapidly, and so the accuracy of this
* method decays quickly. n > 20 should have the correct order of magnitude,
* but may not have many significant figures. {@link #reLnBn(int) } should
* be used instead when possible.
*
* @param n the bernoulli number to compute
* @return <i>B<sub>n</sub></i>
*/
public static double bernoulli(int n)
{
if(n < 0)
return Double.NaN;
if(n == 0)
return 1;
if(n == 1)
return -0.5;
if(n % 2 != 0)
return 0;
int sign = 1;
if(n > 2 && n % 4 == 0)
sign = -1;
return sign*exp(reLnBn(n));
}
public static double erf(double x)
{
/*
* erf(x) = 2 * cdf(x sqrt(2)) -1
*
* where cdf is the cdf of the normal distribution
*/
return 2 * Normal.cdf(x * sqrt(2.0), 0, 1)-1;
}
public static double invErf(double x)
{
/*
* inverf(x) = invcdf(x/2+1/2)/sqrt(2)
*
* where invcdf is the inverse cdf of the normal distribution
*/
return Normal.invcdf(x/2+0.5, 0, 1)/sqrt(2.0);
}
public static double erfc(double x)
{
/*
* erf(x) = 2 * cdf(-x sqrt(2))
*
* where cdf is the cdf of the normal distribution
*/
return 2 * Normal.cdf(-x * sqrt(2.0), 0, 1);
}
public static double invErfc(double x)
{
/*
* inverf(x) = invcdf(x/2)/-sqrt(2)
*
* where invcdf is the inverse cdf of the normal distribution
*/
return Normal.invcdf(x/2, 0, 1)/-sqrt(2.0);
}
/**
* Computes the Beta function B(z,w)
* @param z
* @param w
* @return B(z,w)
*/
public static double beta(double z, double w)
{
return exp(lnBeta(z, w));
}
public static double lnBeta(double z, double w)
{
/*
* The beta function is defined by
*
* Gamma(z) Gamma(w)
* B(z, w) = -----------------
* Gamma(z + w)
*
* However, the definition is numericaly unstable (large value / large value to small result & small input).
* Taking the log of each size and then exponentiating gives a more stable method of computing the result
*
* lnGamma(z) + lnGamma(w) - lnGamma(z + w)
* B(z, w) = e
*/
return lnGamma(z)+lnGamma(w)-lnGamma(z+w);
}
/**
* Computes the regularized incomplete beta function, I<sub>x</sub>(a, b). The result of which is always in the range [0, 1]
*
* @param x any value in the range [0, 1]
* @param a any value ≥ 0
* @param b any value ≥ 0
* @return the result in a range of [0,1]
*/
public static double betaIncReg(double x, double a, double b)
{
if(a <= 0 || b <= 0)
throw new ArithmeticException("a and b must be > 0, not" + a+ ", and " + b);
if(x == 0 || x == 1)
return x;
else if(x < 0 || x > 1)
throw new ArithmeticException("x must be in the range [0,1], not " + x);
//We use this identity to make sure that our continued fraction is always in a range for which it converges faster
if(x > (a+1)/(a+b+2) || (1-x) < (b+1)/(a+b+2) )
return 1-betaIncReg(1-x, b, a);
/*
* All values are from x = 0 to x = 1, in 0.025 increments
* a = 0.5, b = 0.5: max rel error ~ 2.2e-15
* a = 0.5, b = 5: max rel error ~ 2e-15
* a = 5, b = 0.5: max rel error ~ 1.5e-14 @ x ~= 7.75, otherwise rel error ~ 2e-15
* a = 8, b = 10: max rel error ~ 9e-15, rel error is clearly not uniform but always small
* a = 80, b = 100: max rel error ~ 1.2e-14, rel error is clearly not uniform but always small
*/
double numer = a*log(x)+b*log(1-x)-(log(a)+lnBeta(a, b));
return exp(numer)/regIncBeta.lentz(x,a,b);
}
/**
* Computes the inverse of the incomplete beta function,
* I<sub>p</sub><sup>-1</sup>(a,b), such that {@link #betaIncReg(double, double, double) I<sub>x</sub>(a, b) } = <tt>p</tt>.
* The returned value, x, will always be in the range [0,1].
* The input <tt>p</tt>, must also be in the range [0,1].
*
* @param p any value in the range [0,1]
* @param a any value ≥ 0
* @param b any value ≥ 0
* @return the value x, such that {@link #betaIncReg(double, double, double) I<sub>x</sub>(a, b) } will return p.
*/
public static double invBetaIncReg(double p, double a, double b)
{
if(p < 0 || p > 1)
throw new ArithmeticException("The value p must be in the range [0,1], not" + p);
return RiddersMethod.root(0, 1, (x) -> betaIncReg(x, a, b)- p);
}
/**
* Computes the regularized gamma function Q(a,z) = Γ(a,z)/Γ(a). <br>
* This method is more numerically stable and accurate than computing
* it via the direct method, and is always in the range [0,1]. <br><br>
* Note: The this method returns {@link Double#NaN} for a<0, though real values of Q(a,z) do exist
* @param a any value ≥ 0
* @param z any value > 0
* @return Q(a,z)
*/
public static double gammaQ(double a, double z)
{
if(z<= 0 || a < 0 )
return Double.NaN;
if(z < a+1)
return 1-gammaPSeries(a, z);
/**
* On the range of x from 0.5 to 50
* a=0.15, |rel error| is ~ 3e-15 for most values of x, with a bad spot of |rel error| ~ 3e-11 when x ~= 5.75
* a=0.5, max |rel error| is 3.9e-15
* a=1, max |rel error| is 4e-15, rel error groining as x -> infinity
* a=5, max |rel error| is 7e-13, but only near x ~= 0, most is in the range 3e-15
* a=10, max |rel error| is 4e-6, but only near x ~= 0, most is in the range 3e-15
*/
return exp(a*log(z)-z-lnGamma(a))/gammaQ.lentz(a, z);
}
public static double gammaPSeries(double a, double z)
{
double ap = a;
double sum;
double del = sum = 1.0/a;
do
{
ap += 1.0;
del *= z/ap;
sum += del;
if(abs(del) < abs(sum)*1e-15)
return sum*exp(-z+a*log(z)-lnGamma(a));
}
while(true);
}
/**
* Returns the regularized gamma function P(a,z) = γ(a,z)/Γ(a). <br>
* This method is more numerically stable and accurate than computing
* it via the direct method, and is always in the range [0,1]. <br><br>
* Note: The this method returns {@link Double#NaN} for a<0, though real values of P(a,z) do exist
* @param a any value ≥ 0
* @param z any value > 0
* @return P(a,z)
*/
public static double gammaP(double a, double z)
{
if(z<= 0 || a < 0)
return Double.NaN;
if(z < a+1)
return gammaPSeries(a, z);
/*
* This method is currently usntable for values of z that grow larger, so it is not currently in use
* return exp(a*log(z)-z-lnGamma(a))/gammaP.lentz(a,z);
*/
return 1 - gammaQ(a, z);
}
/**
* Finds the value <tt>x</tt> such that {@link #gammaP(double,double) P(a,x)} = <tt>p</tt>.
* @param a any real value
* @param p and value in the range [0, 1]
* @return the inverse
*/
public static double invGammaP(double p, double a)
{
//Method from Numerical Recipies 3rd edition p 263
if(p < 0 || p > 1)
throw new ArithmeticException("Probability p must be in the range [0,1], "+ p + "is not valid");
//First an estimate must be obtained
double am1 = a-1;
double lnGamA = lnGamma(a);
double x;//the to be returned
double afac = 1;//ONLY used when a>1
double lna1 =1;//ONLY used when a>1
if(a > 1)
{
lna1 = log(am1);
afac = exp(am1*(lna1-1)-lnGamA);
double pp = (p < 0.5) ? p : 1-p;
double t = sqrt(-2*log(pp));
//Now our inital estimate
x = (2.30753+t*0.27061)/(1.+t*(0.99229+t*0.04481)) - t;
if(p < 0.5)
x = -x;
x = max(1e-3, a * pow(1.0 - (1.0/(9.*a)) - x/(3.*sqrt(a)) , 3) );//if the estimate is too small, increase it
}
else
{
double t = 1.0 - a*(0.253+a*0.12);
if(p < t)
x = pow(p/t, 1.0/a);
else
x = 1-log(1-(p-t)/(1.0-t));
}
//Estimate obtained, now refinement
for(int j = 0; j < 12; j++)
{
if (x <= 0)//x is very small, return 0 b/c rounding errors and loss of precision will make accuracy imposible
return 0;
double err = gammaP(a, x) - p;
double t;
if(a > 1)
t = afac*exp(-(x-am1)+am1*(log(x)-lna1));
else
t = exp(-x+am1*log(x)-lnGamA);
double u = err/t;
t = u / (1-0.5*min(1 , u*(am1/x - 1)) );//Halley's method
x -= t;
if(x <= 0)//bais x from going negative (means we are getting a bad value)
x = (x+t)/2;
if(abs(t) < 1e-8*x)
break;//the error is the (1e-8)^2, if we reach this point we have already converged
}
return x;
}
/**
* Computes the incomplete gamma function, Γ(a,z).
* <br>
* Returns {@link Double#NaN} for z ≤ 0
* @param a any value (-∞, ∞)
* @param z any value > 0
* @return Γ(a,z)
*/
public static double gammaIncUp(double a, double z)
{
if(z <= 0)
return Double.NaN;
/**
* On the range of x from 0.5 to 50
* a=0.15, max |rel error| is ~1.3e-11, but only in a small range x ~= 11. Otherwise rel error ~ 2e-15
* a=0.5, max |rel error| is ~3.6e-15, less in all places otherwise
* a=1, max |rel error| is ~4e-15, rel error grows as x-> infinity
* a=5, max |rel error| is 7e-13, but only near x ~= 0, most is in the range 2e-15
* a=10, max |rel error| is 4.9e-6, but only near x ~= 0, most is in the range 5e-15
*/
return exp(a*log(z)-z)/upIncGamma.lentz(a,z);
}
/**
* Computes the lower incomplete gamma function, γ(a,z).
* <br>
* Returns {@link Double#NaN} for z ≤ 0
* @param a any value (-∞, ∞)
* @param z any value > 0
* @return γ(a,z)
*/
public static double gammaIncLow(double a, double z)
{
if(z <= 0)
return Double.NaN;
/**
* On the range of x from 0.5 to 50
* a=0.15, see a=1
* a=0.5, see a=1
* a=1, max |rel error| is ~1.7e-13, rel error starts at 1e-15 and grows to the max as z increases
* a=5, max |rel error| is 3e-13, but only near x ~= 0. for x > epsilon error starts at 5e-15 and grows with z up to 1e-13
* a=10, max |rel error| is 2e-7, but only near x ~= 0, most is in the range 5e-15, grows to 5e-14 as z-> infinity
*/
return exp(lnLowIncGamma(a, z));
}
private static double lnLowIncGamma(double a, double x)
{
/*
* We compute the log of the lower incomplete gamma function by taking the log of
*
* oo
* =====
* -x a \ Gamma(a) n
* y(a, x) = e x > ---------------- x
* / Gamma(a + 1 + n)
* =====
* n = 0
*
* Which becomes
*
*
* / oo \
* |===== |
* |\ Gamma(a) n|
* log(y(a, x)) = -x + log(x) a + log| > ---------------- x |
* |/ Gamma(a + 1 + n) |
* |===== |
* \n = 0 /
*
* To reduce over flow of the gammas and exponentation in the summation, we compute the sum as
*
* oo
* =====
* \ LogGamma(a) - LogGamma(a + 1 + n) + ln(x) n
* > e
* /
* =====
* n = 0
*
* Testin with x=0.5 to 100 (in increments of 0.5)
* a=0.15, max relative error is ~ 6e-13, with x < 30 having a relative error smaller than 5e-14
* a=0.5, maximum relative and absolute eror is ~1.5e-12 and ~1.5e-12 repsectivly, the error starts getting above e-15 when x = 25, and x=50 the error is up to 2.2e-13
* a=10, maximum relative and absolute eror is ~4e-14 and ~4e-13 repsectivly, the error starts getting abovee-15 when x = 49. For x near zero (up to x ~ 2.5) the error is higher before droping, ~10^-14
* a=25, maximum relative error is ~9.99e-15. From x~ 0 to 14, the error is worse, then droping to ~e-16 .
* a=50, accuracy starting to degrad badly. From x~ 0 to 18 the error goes from 1.3 to 1e-7, the erro grows at an exponential rate as x -> 0. As x-> Infinity the error gets back down to ~5e-16
*/
//Sumation first
double lnGa = lnGamma(a);
/**
* This value will be updated by the property Gamma(z+1) = Gamma(z) * z, which - when taken the log of, is <br>
* LnGamma(z+1) = LnGamma(z) + ln(z)
*/
double lnGan = lnGa+log(a);
double n = 0;
/**
* this is the n * ln(x) term. it will be updated by adding the log of x at each step
*/
double lnXN = 0;
double lnX = log(x);
//Set up, now start summing
double term = exp(lnGa - lnGan + lnXN);
double sum = term;
while(term > 1e-15)
{
n++;
lnXN += lnX;
lnGan += log(a+n);
term = exp(lnGa - lnGan + lnXN);
sum += term;
}
//now the rest
return -x + lnX*a +log(sum);
}
@SuppressWarnings("unused")
private static double lnLowIncGamma1(double a, double x)
{
double inter = lowIncGamma.lentz(a,x);
if(inter <= 1e-16)//The result was ~0, in which case Gamma[a,z] ~= Gamma[a]
return lnGamma(a);
return a*log(x)-x-log(inter);
}
private static final ContinuedFraction lowIncGamma = new ContinuedFraction()
{
@Override
public double getA(int pos, double... args)
{
if (pos % 2 == 0)
{
pos /= 2;//the # of the even term
return pos * args[1];
}
else
{
pos = (pos + 0) / 2;
return -(args[0] + pos) * args[1];
}
}
@Override
public double getB(int pos, double... args)
{
return args[0] + pos;
}
};
/**
* See http://functions.wolfram.com/GammaBetaErf/GammaRegularized/10/0003/
*/
private static final ContinuedFraction gammaQ = new ContinuedFraction()
{
@Override
public double getA(int pos, double... args)
{
return pos*(args[0]-pos);
}
@Override
public double getB(int pos, double... args)
{
return (1 +pos*2) - args[0] + args[1];
}
};
/**
* Using the formula from http://functions.wolfram.com/GammaBetaErf/GammaRegularized/10/0009/
*
* Note the formula is given in terms of gammaQ
*
* {@link #gammaQ} is accurate for P and Q for the range of z > 0, this is used for z <= 0
*
*/
private static final ContinuedFraction gammaP = new ContinuedFraction()
{
@Override
public double getA(int pos, double... args)
{
if(pos % 2 == 0)//even step
{
pos/=2;
return args[1]*pos;
}
//Else its an odd step
pos= (pos+1)/2;
return -args[1]*(args[0]+pos);
}
@Override
public double getB(int pos, double... args)
{
return args[0] + pos;
}
};
/**
* continued fraction generated from mathematica
*
* f(a,z) = e^-x*x^a / CF
*
* a_k = (a-k)k
* b_k = 1+k*2-a+x
*
*/
private static final ContinuedFraction upIncGamma = new ContinuedFraction()
{
@Override
public double getA(int pos, double... args)
{
return (args[0]-pos)*pos;
}
@Override
public double getB(int pos, double... args)
{
return (1+pos*2)-args[0]+args[1];
}
};
/**
* See http://dlmf.nist.gov/8.17
*/
private static final ContinuedFraction regIncBeta = new ContinuedFraction() {
@Override
public double getA(int pos, double... args)
{
if(pos % 2 == 0)
{
pos /=2;
return pos*(args[2]-pos)*args[0]/ ( (args[1] + 2*pos-1)*(args[1] + 2*pos) );
}
pos = (pos-1)/2;
double numer = -(args[1] + pos)*(args[1]+args[2]+pos)*args[0];
double denom = (args[1] + 2*pos)*(args[1]+1+2*pos);
return numer/denom;
}
@Override
public double getB(int pos, double... args)
{
return 1.0;
}
};
}
| 40,472 | 33.415816 | 209 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/TrigMath.java |
package jsat.math;
import static java.lang.Math.*;
/**
* This class includes additional trig and hyperbolic trig that
* does not come with Java.Math by default.
*
* @author Edward Raff
*/
public class TrigMath
{
public static double coth(double x)
{
double eX = exp(x);
double eNX = exp(-x);
return (eX + eNX) / (eX - eNX);
}
public static double sech(double x)
{
return 2 / (exp(x) + exp(-x));
}
public static double csch(double x)
{
return 2 / (exp(x) - exp(-x));
}
public static double asinh(double x)
{
return log(x + sqrt(x*x + 1));
}
public static double acosh(double x)
{
if(x < 1)
return Double.NaN;//Complex result
return log(x + sqrt(x*x - 1));
}
public static double atanh(double x)
{
if(abs(x) >= 1)
return Double.NaN;
return 0.5* log((x+1) / (x-1));
}
public static double asech(double x)
{
if(x <= 0 || x > 1)
return Double.NaN;
return log((1 + sqrt(1-x*x))/x);
}
public static double acsch(double x)
{
return log(1/x + sqrt(1+x*x)/abs(x));
}
public static double acotch(double x)
{
if(abs(x) <= 1)
return Double.NaN;
return 0.5* log((x+1) / (x-1));
}
}
| 1,420 | 19.014085 | 64 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/decayrates/DecayRate.java | package jsat.math.decayrates;
import java.io.Serializable;
/**
* Many algorithms use a learning rate to adjust the step size by which the
* search space is covered. In practice, it is often useful to reduce this
* learning rate over time. In this way, large steps can be taken in the
* beginning when we are far from the solution, and smaller steps when we have
* gotten closer to the solution and do not want to step too far away.
*
* @author Edward Raff
*/
public interface DecayRate extends Serializable
{
/**
* Decays the initial value over time.
*
* @param time the current time through the algorithm in the range
* [0, <tt>maxTime</tt>]
* @param maxTime the maximum time step that will be seen
* @param initial the initial value
* @return the decayed value over time of the <tt>initial</tt> value
* @throws ArithmeticException if the time is negative
*/
public double rate(double time, double maxTime, double initial);
/**
* Decays the initial value over time.
*
* @param time the current time step to return a value for
* @param initial the initial learning rate
* @return the decayed value
*/
public double rate(double time, double initial);
public DecayRate clone();
}
| 1,306 | 32.512821 | 79 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/decayrates/ExponetialDecay.java | package jsat.math.decayrates;
import java.util.List;
import jsat.math.FastMath;
import jsat.parameters.Parameter;
import jsat.parameters.Parameterized;
/**
* The Exponential Decay requires the maximum time step to be explicitly known ahead
* of time. Provided either in the call to
* {@link #rate(double, double, double) }, or internal by
* {@link #setMinRate(double) }. <br>
* <br>
* The Exponential Decay will decay at an exponential rate from the initial value
* until the specified {@link #setMinRate(double) } is reached.
*
* @author Edward Raff
*/
public class ExponetialDecay implements DecayRate, Parameterized
{
private static final long serialVersionUID = 4762109043998381095L;
private double min;
private double maxTime;
/**
* Creates a new Exponential Decay
*
* @param min a value less than the learning rate that, that will be the
* minimum rate returned once the maximum time is reached
*/
public ExponetialDecay(double min)
{
this(min, 1000000);
}
/**
* Creates a new Exponential Decay
* <br>
* <br>
* Note that when using {@link #rate(double, double, double) }, the maxTime
* is always superceded by the value given to the function.
* @param min a value less than the learning rate that, that will be the
* minimum returned value
* @param maxTime the maximum amount of time
*/
public ExponetialDecay(double min, double maxTime)
{
setMinRate(min);
setMaxTime(maxTime);
}
/**
* Creates a new decay rate that decays down to 1e-4
*/
public ExponetialDecay()
{
this(1e-4);
}
/**
* Sets the minimum learning rate to return
* @param min the minimum learning rate to return
*/
public void setMinRate(double min)
{
if(min <= 0 || Double.isNaN(min) || Double.isInfinite(min))
throw new RuntimeException("minRate should be positive, not " + min);
this.min = min;
}
/**
* Returns the minimum value to return from he <i>rate</i> methods
* @return the minimum value to return
*/
public double getMinRate()
{
return min;
}
/**
* Sets the maximum amount of time to allow in the rate decay. Any time
* value larger will be treated as the set maximum.<br>
* <br>
* Any calls to {@link #rate(double, double, double) } will use the value
* provided in that method call instead.
* @param maxTime the maximum amount of time to allow
*/
public void setMaxTime(double maxTime)
{
if(maxTime <= 0 || Double.isInfinite(maxTime) || Double.isNaN(maxTime))
throw new RuntimeException("maxTime should be positive, not " + maxTime);
this.maxTime = maxTime;
}
/**
* Returns the maximum time to use in the rate decay
* @return the maximum time to use in the rate decay
*/
public double getMaxTime()
{
return maxTime;
}
@Override
public double rate(double time, double maxTime, double initial)
{
if(time < 0)
throw new ArithmeticException("Negative time value given");
return (initial-min)* FastMath.pow(maxTime, -Math.min(time, maxTime)/maxTime)+min;
}
@Override
public double rate(double time, double initial)
{
return rate(time, maxTime, initial);
}
@Override
public DecayRate clone()
{
return new ExponetialDecay(min, maxTime);
}
@Override
public String toString()
{
return "Exponetial Decay";
}
}
| 3,632 | 26.732824 | 90 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/decayrates/InverseDecay.java | package jsat.math.decayrates;
import java.util.List;
import jsat.classifiers.svm.Pegasos;
import jsat.parameters.Parameter;
import jsat.parameters.Parameterized;
/**
*
* Decays an input by the inverse of the amount of time that has occurred, the
* max time being irrelevant. More specifically as
* η / ({@link #setAlpha(double) α}({@link #setTau(double) τ} + time))<br>
* <br>
* This decay rate can be used to create the same rate used by {@link Pegasos},
* by using an initial rate of 1 and setting τ = 1 and α = λ,
* where λ is the regularization term used by the method calling the
* decay rate.
*
* @author Edward Raff
*/
public class InverseDecay implements DecayRate, Parameterized
{
private static final long serialVersionUID = 2756825625752543664L;
private double tau;
private double alpha;
/**
* Creates a new Inverse decay rate
* @param tau the initial time offset
* @param alpha the time scaling
*/
public InverseDecay(double tau, double alpha)
{
setTau(tau);
setAlpha(alpha);
}
/**
* Creates a new Inverse Decay rate
*/
public InverseDecay()
{
this(1, 1);
}
/**
* Controls the scaling of the divisor, increasing α dampens the
* whole range of values. Increasing it increases the values.
* value.
* @param alpha the scaling parameter
*/
public void setAlpha(double alpha)
{
if(alpha <= 0 || Double.isInfinite(alpha) || Double.isNaN(alpha))
throw new IllegalArgumentException("alpha must be a positive constant, not " + alpha);
this.alpha = alpha;
}
/**
* Returns the scaling parameter
* @return the scaling parameter
*/
public double getAlpha()
{
return alpha;
}
/**
* Controls the rate early in time, but has a decreasing impact on the rate
* returned as time goes forward. Larger values of τ dampen the initial
* rates returned, while lower values let the initial rates start higher.
*
* @param tau the early rate dampening parameter
*/
public void setTau(double tau)
{
if(tau <= 0 || Double.isInfinite(tau) || Double.isNaN(tau))
throw new IllegalArgumentException("tau must be a positive constant, not " + tau);
this.tau = tau;
}
/**
* Returns the early rate dampening parameter
* @return the early rate dampening parameter
*/
public double getTau()
{
return tau;
}
@Override
public double rate(double time, double maxTime, double initial)
{
return rate(time, initial);
}
@Override
public double rate(double time, double initial)
{
return initial/(alpha*(tau+time));
}
@Override
public DecayRate clone()
{
return new InverseDecay(tau, alpha);
}
@Override
public String toString()
{
return "Inverse Decay";
}
}
| 3,047 | 24.613445 | 98 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.