repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
JSAT | JSAT-master/JSAT/src/jsat/math/decayrates/LinearDecay.java | package jsat.math.decayrates;
import java.util.List;
import jsat.parameters.Parameter;
import jsat.parameters.Parameterized;
/**
* The Linear Decay requires the maximum time step to be explicitly known ahead
* of time. Provided either in the call to
* {@link #rate(double, double, double) }, or internal by
* {@link #setMinRate(double) }. <br>
* <br>
* The Linear Decay will decay at a constant rate / slope from the initial value
* until the specified {@link #setMinRate(double) } is reached.
*
* @author Edward Raff
*/
public class LinearDecay implements DecayRate, Parameterized
{
private static final long serialVersionUID = 4934146018742844875L;
private double min;
private double maxTime;
/**
* Creates a new linear decay that goes down to 1e-4 after 100,000 units of
* time
*/
public LinearDecay()
{
this(1e-4, 100000);
}
/**
* Creates a new Linear Decay
* <br>
* <br>
* Note that when using {@link #rate(double, double, double) }, the maxTime
* is always superceded by the value given to the function.
* @param min a value less than the learning rate that, that will be the
* minimum returned value
* @param maxTime the maximum amount of time
*/
public LinearDecay(double min, double maxTime)
{
setMinRate(min);
setMaxTime(maxTime);
}
/**
* Sets the minimum learning rate to return
* @param min the minimum learning rate to return
*/
public void setMinRate(double min)
{
if(min <= 0 || Double.isNaN(min) || Double.isInfinite(min))
throw new RuntimeException("minRate should be positive, not " + min);
this.min = min;
}
/**
* Returns the minimum value to return from he <i>rate</i> methods
* @return the minimum value to return
*/
public double getMinRate()
{
return min;
}
/**
* Sets the maximum amount of time to allow in the rate decay. Any time
* value larger will be treated as the set maximum.<br>
* <br>
* Any calls to {@link #rate(double, double, double) } will use the value
* provided in that method call instead.
* @param maxTime the maximum amount of time to allow
*/
public void setMaxTime(double maxTime)
{
if(maxTime <= 0 || Double.isInfinite(maxTime) || Double.isNaN(maxTime))
throw new RuntimeException("maxTime should be positive, not " + maxTime);
this.maxTime = maxTime;
}
/**
* Returns the maximum time to use in the rate decay
* @return the maximum time to use in the rate decay
*/
public double getMaxTime()
{
return maxTime;
}
@Override
public double rate(double time, double maxTime, double initial)
{
if(time < 0)
throw new ArithmeticException("Negative time value given");
return (initial-min)*(1.0-Math.min(time, maxTime)/maxTime)+min;
}
@Override
public double rate(double time, double initial)
{
return rate(time, maxTime, initial);
}
@Override
public DecayRate clone()
{
return new LinearDecay(min, maxTime);
}
@Override
public String toString()
{
return "Linear Decay";
}
}
| 3,308 | 26.575 | 85 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/decayrates/NoDecay.java | package jsat.math.decayrates;
/**
* A possible value for a decaying learning rate. NoDecay will perform no
* decaying of the initial value, the initial value will always be returned
* regardless of the input.
*
* @author Edward Raff
*/
public class NoDecay implements DecayRate
{
private static final long serialVersionUID = -4502356199281880268L;
@Override
public double rate(double time, double maxTime, double initial)
{
return rate(time, initial);
}
@Override
public double rate(double time, double initial)
{
if(time < 0)
throw new ArithmeticException("Negative time value given");
return initial;
}
@Override
public DecayRate clone()
{
return new NoDecay();
}
@Override
public String toString()
{
return "NoDecay";
}
}
| 858 | 19.95122 | 76 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/decayrates/PowerDecay.java | package jsat.math.decayrates;
import java.util.List;
import jsat.math.FastMath;
import jsat.parameters.Parameter;
import jsat.parameters.Parameterized;
/**
*
* Decays an input by power of the amount of time that has occurred, the
* max time being irrelevant. More specifically as
* η ({@link #setTau(double) τ} + time)<sup>-{@link #setAlpha(double) α}</sup><br>
* <br>
* {@link InverseDecay} is a special case of this decay when α=1. <br>
* {@link NoDecay} is a special case of this decay when α = 0 <br>
*
* @author Edward Raff
*/
public class PowerDecay implements DecayRate, Parameterized
{
private static final long serialVersionUID = 6075066391550611699L;
private double tau;
private double alpha;
/**
* Creates a new Power decay rate
* @param tau the initial time offset
* @param alpha the time scaling
*/
public PowerDecay(double tau, double alpha)
{
setTau(tau);
setAlpha(alpha);
}
/**
* Creates a new Power Decay rate
*/
public PowerDecay()
{
this(10, 0.5);
}
/**
* Controls the scaling via exponentiation, increasing α increases the
* rate at which the rate decays. As α goes to zero, the decay rate
* goes toward zero (meaning the value returned becomes constant),
* @param alpha the scaling parameter in [0, ∞), but should generally
* be kept in (0, 1).
*/
public void setAlpha(double alpha)
{
if(alpha < 0 || Double.isInfinite(alpha) || Double.isNaN(alpha))
throw new IllegalArgumentException("alpha must be a non negative constant, not " + alpha);
this.alpha = alpha;
}
/**
* Returns the scaling parameter
* @return the scaling parameter
*/
public double getAlpha()
{
return alpha;
}
/**
* Controls the rate early in time, but has a decreasing impact on the rate
* returned as time goes forward. Larger values of τ dampen the initial
* rates returned, while lower values let the initial rates start higher.
*
* @param tau the early rate dampening parameter
*/
public void setTau(double tau)
{
if(tau <= 0 || Double.isInfinite(tau) || Double.isNaN(tau))
throw new IllegalArgumentException("tau must be a positive constant, not " + tau);
this.tau = tau;
}
/**
* Returns the early rate dampening parameter
* @return the early rate dampening parameter
*/
public double getTau()
{
return tau;
}
@Override
public double rate(double time, double maxTime, double initial)
{
return rate(time, initial);
}
@Override
public double rate(double time, double initial)
{
return initial * FastMath.pow(tau + time, -alpha);
}
@Override
public DecayRate clone()
{
return new PowerDecay(tau, alpha);
}
@Override
public String toString()
{
return "Power Decay";
}
}
| 3,074 | 25.73913 | 102 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/integration/AdaptiveSimpson.java |
package jsat.math.integration;
import jsat.math.Function1D;
/**
* This class provides an implementation of the Adaptive Simpson method for
* numerically computing an integral
*
* @author Edward Raff
*/
public class AdaptiveSimpson
{
/**
* Numerically computes the integral of the given function
*
* @param f the function to integrate
* @param tol the precision for the desired result
* @param a the lower limit of the integral
* @param b the upper limit of the integral
* @return an approximation of the integral of
* ∫<sub>a</sub><sup>b</sup>f(x) , dx
*/
static public double integrate(Function1D f, double tol, double a, double b)
{
return integrate(f, tol, a, b, 100);
}
/**
* Numerically computes the integral of the given function
*
* @param f the function to integrate
* @param tol the precision for the desired result
* @param a the lower limit of the integral
* @param b the upper limit of the integral
* @param maxDepth the maximum recursion depth
* @return an approximation of the integral of
* ∫<sub>a</sub><sup>b</sup>f(x) , dx
*/
static public double integrate(Function1D f, double tol, double a, double b, int maxDepth)
{
if(a == b)
return 0;
else if(a > b)
throw new RuntimeException("Integral upper limit (" + b+") must be larger than the lower-limit (" + a + ")");
double h = b-a;
double c = (a+b)/2;
double f_a = f.f(a);
double f_b = f.f(b);
double f_c = f.f(c);
double one_simpson = h * (f_a + 4 * f_c + f_b) / 6;
double d = (a + c) / 2;
double e = (c + b) / 2;
double two_simpson = h * (f_a + 4 * f.f(d) + 2 * f_c + 4 * f.f(e) + f_b) / 12;
if(maxDepth <= 0)
return two_simpson;
if(Math.abs(one_simpson-two_simpson) < 15*tol)
return two_simpson + (two_simpson - one_simpson)/15;
else
{
double left_simpson = integrate(f, tol/2, a, c, maxDepth-1);
double right_simpson = integrate(f, tol/2, c, b, maxDepth-1);
return left_simpson + right_simpson;
}
}
}
| 2,270 | 30.109589 | 121 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/integration/Romberg.java |
package jsat.math.integration;
import static java.lang.Math.*;
import jsat.math.Function1D;
/**
* This class provides an implementation of the Romberg method for
* numerically computing an integral
*
* @author Edward Raff
*/
public class Romberg
{
/**
* Numerically computes the integral of the given function
*
* @param f the function to integrate
* @param a the lower limit of the integral
* @param b the upper limit of the integral
* @return an approximation of the integral of
* ∫<sub>a</sub><sup>b</sup>f(x) , dx
*/
public static double romb(Function1D f, double a, double b)
{
return romb(f, a, b, 20);
}
/**
* Numerically computes the integral of the given function
*
* @param f the function to integrate
* @param a the lower limit of the integral
* @param b the upper limit of the integral
* @param max the maximum number of extrapolation steps to perform.
* ∫<sub>a</sub><sup>b</sup>f(x) , dx
*/
public static double romb(Function1D f, double a, double b, int max)
{
// see http://en.wikipedia.org/wiki/Romberg's_method
max+=1;
double[] s = new double[max];//first index will not be used
double var = 0;//var is used to hold the value R(n-1,m-1), from the previous row so that 2 arrays are not needed
double lastVal = Double.NEGATIVE_INFINITY;
for(int k = 1; k < max; k++)
{
for(int i = 1; i <= k; i++)
{
if(i == 1)
{
var = s[i];
s[i] = Trapezoidal.trapz(f, a, b, (int)pow(2, k-1));
}
else
{
s[k]= ( pow(4 , i-1)*s[i-1]-var )/(pow(4, i-1) - 1);
var = s[i];
s[i]= s[k];
}
}
if( abs(lastVal - s[k]) < 1e-15 )//there is only approximatly 15.955 accurate decimal digits in a double, this is as close as we will get
return s[k];
else lastVal = s[k];
}
return s[max-1];
}
}
| 2,172 | 28.767123 | 149 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/integration/Trapezoidal.java |
package jsat.math.integration;
import jsat.math.Function1D;
/**
* This class provides an implementation of the Trapezoidal method for
* numerically computing an integral
*
* @author Edward Raff
*/
public class Trapezoidal
{
/**
* Numerically computes the integral of the given function
*
* @param f the function to integrate
* @param a the lower limit of the integral
* @param b the upper limit of the integral
* @param N the number of points in the integral to take, must be ≥ 2.
* @return an approximation of the integral of
* ∫<sub>a</sub><sup>b</sup>f(x) , dx
*/
static public double trapz(Function1D f, double a, double b, int N)
{
if(a == b)
return 0;
else if(a > b)
throw new RuntimeException("Integral upper limit (" + b+") must be larger than the lower-limit (" + a + ")");
else if(N < 1)
throw new RuntimeException("At least two integration parts must be used, not " + N);
/*
* b / N - 1 \
* / | ===== |
* | b - a |f(a) + f(b) \ / k (b - a)\|
* | f(x) dx = ----- |----------- + > f|a + ---------||
* | N | 2 / \ N /|
* / | ===== |
* a \ k = 1 /
*/
double sum =0;
for(int k = 1; k < N; k++)
sum += f.f(a+k*(b-a)/N);
sum+= (f.f(a)+f.f(b))/2;
return (b-a)/N*sum;
}
}
| 1,687 | 32.76 | 121 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/BFGS.java | package jsat.math.optimization;
import jsat.linear.*;
import jsat.math.*;
/**
* Implementation of the Broyden–Fletcher–Goldfarb–Shanno (BFGS) algorithm for
* function minimization. For {@code n} dimensional problems it requires
* <i>O(n<sup>2</sup>)</i> work per iteration and uses first order information
* to approximate the Hessian.
*
* @author Edward Raff
*/
public class BFGS implements Optimizer
{
private LineSearch lineSearch;
private int maxIterations;
private boolean inftNormCriterion = true;
/**
* Creates a new BFGS optimization object that uses a maximum of 250
* iterations and a {@link BacktrackingArmijoLineSearch backtracking} line
* search.
*/
public BFGS()
{
this(250, new BacktrackingArmijoLineSearch());
}
/**
* Creates a new BFGS optimization object
* @param maxIterations the maximum number of iterations to allow before
* stopping
* @param lineSearch the line search method to use on updates
*/
public BFGS(int maxIterations, LineSearch lineSearch)
{
setMaximumIterations(maxIterations);
setLineSearch(lineSearch);
}
@Override
public void optimize(double tolerance, Vec w, Vec x0, Function f, FunctionVec fp, boolean parallel)
{
if(fp == null)
fp = Function.forwardDifference(f);
LineSearch search = lineSearch.clone();
Matrix H = Matrix.eye(x0.length());
Vec x_prev = x0.clone();
Vec x_cur = x0.clone();
final double[] f_xVal = new double[1];//store place for f_x
//graidnet
Vec x_grad = x0.clone();
x_grad.zeroOut();
Vec x_gradPrev = x_grad.clone();
//p_l
Vec p_k = x_grad.clone();
Vec s_k = x_grad.clone();
Vec y_k = x_grad.clone();
f_xVal[0] = f.f(x_cur);
x_grad = fp.f(x_cur, x_grad);
int iter = 0;
while(gradConvgHelper(x_grad) > tolerance && iter < maxIterations)
{
iter++;
p_k.zeroOut();
H.multiply(x_grad, -1, p_k);//p_k = −H_k ∇f_k; (6.18)
//Set x_k+1 = x_k + α_k p_k where α_k is computed from a line search
x_cur.copyTo(x_prev);
x_grad.copyTo(x_gradPrev);
double alpha_k = search.lineSearch(1.0, x_prev, x_gradPrev, p_k, f, fp, f_xVal[0], x_gradPrev.dot(p_k), x_cur, f_xVal, x_grad, parallel);
if(alpha_k < 1e-12 && iter > 5)//if we are making near epsilon steps consider it done
break;
if(!search.updatesGrad())
fp.f(x_cur, x_grad, parallel);
//Define s_k =x_k+1 −x_k and y_k = ∇f_k+1 −∇f_k;
x_cur.copyTo(s_k);
s_k.mutableSubtract(x_prev);
x_grad.copyTo(y_k);
y_k.mutableSubtract(x_gradPrev);
//Compute H_k+1 by means of (6.17);
double skyk = s_k.dot(y_k);
if(skyk <= 0)
{
H.zeroOut();
for(int i = 0; i < H.rows(); i++)
H.set(i, i, 1);
continue;
}
if(iter == 0 && skyk > 1e-12)
for(int i = 0; i < H.rows(); i++)
H.set(i, i, skyk/y_k.dot(y_k));
/*
* From "A Perfect Example for The BFGS Method" equation 1.5
* aamath: H_(k+1)=H_k-(s_k*y_k^T*H_k+H_k*y_k*s_k^T)/(s_k^T*y_k)+(1+(y_k^T*H_k*y_k)/(s_k^T*y_k))*((s_k*s_k^T)/(s_k^T*y_k))
*
* T T / T \ T
* s y H + H y s | y H y | s s
* k k k k k k | k k k| k k
* H = H - ------------------- + |1 + --------| -----
* k + 1 k T | T | T
* s y | s y | s y
* k k \ k k / k k
*
* TODO: y_k^T H_k y_k should be just a scalar constant
* TODO: exploit the symetry of H_k
*/
Vec Hkyk = H.multiply(y_k);
Vec ykHk = y_k.multiply(H);
double b = (1+y_k.dot(Hkyk)/skyk)/skyk;//coef for right rank update
//update
Matrix.OuterProductUpdate(H, s_k, ykHk, -1/skyk);
Matrix.OuterProductUpdate(H, Hkyk, s_k, -1/skyk);
Matrix.OuterProductUpdate(H, s_k, s_k, b);
}
x_cur.copyTo(w);
}
/**
* By default the infinity norm is used to judge convergence. If set to
* {@code false}, the 2 norm will be used instead.
* @param inftNormCriterion
*/
public void setInftNormCriterion(boolean inftNormCriterion)
{
this.inftNormCriterion = inftNormCriterion;
}
/**
* Returns whether or not the infinity norm ({@code true}) or 2 norm
* ({@code false}) is used to determine convergence.
* @return {@code true} if the infinity norm is in use, {@code false} for
* the 2 norm
*/
public boolean isInftNormCriterion()
{
return inftNormCriterion;
}
private double gradConvgHelper(Vec grad)
{
if(!inftNormCriterion)
return grad.pNorm(2);
double max = 0;
for(IndexValue iv : grad)
max = Math.max(max, Math.abs(iv.getValue()));
return max;
}
@Override
public void setMaximumIterations(int iterations)
{
if(iterations < 1)
throw new IllegalArgumentException("Iterations must be a positive value, not " + iterations);
this.maxIterations = iterations;
}
@Override
public int getMaximumIterations()
{
return maxIterations;
}
/**
* Sets the line search method used at each iteration
* @param lineSearch the line search method used at each iteration
*/
public void setLineSearch(LineSearch lineSearch)
{
this.lineSearch = lineSearch;
}
/**
* Returns the line search method used at each iteration
* @return the line search method used at each iteration
*/
public LineSearch getLineSearch()
{
return lineSearch;
}
@Override
public Optimizer clone()
{
return new BFGS(maxIterations, lineSearch.clone());
}
}
| 6,555 | 31.295567 | 149 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/BacktrackingArmijoLineSearch.java | package jsat.math.optimization;
import jsat.linear.Vec;
import jsat.math.Function;
import jsat.math.FunctionVec;
/**
* An implementation of Backtraking line search using the Armijo rule.
* The search for alpha is done by quadratic and cubic interpolation without
* using any derivative evaluations.
* @author Edward Raff
*/
public class BacktrackingArmijoLineSearch implements LineSearch
{
private double rho;
private double c1;
/**
* Creates a new Backtracking line search
*/
public BacktrackingArmijoLineSearch()
{
this(0.5, 1e-1);
}
/**
* Creates a new Backtracking line search object
* @param rho constant to decrease alpha by in (0, 1) when interpolation is
* not possible
* @param c1 the <i>sufficient decrease condition</i> condition constant in
* (0, 1/2)
*/
public BacktrackingArmijoLineSearch(double rho, double c1)
{
if(!(rho > 0 && rho < 1))
throw new IllegalArgumentException("rho must be in (0,1), not " + rho);
this.rho = rho;
setC1(c1);
}
/**
* Sets the constant used for the <i>sufficient decrease condition</i>
* f(x+α p) ≤ f(x) + c<sub>1</sub> α p<sup>T</sup>∇f(x)
* @param c1 the <i>sufficient decrease condition</i>
*/
public void setC1(double c1)
{
if(c1 <= 0 || c1 >= 0.5)
throw new IllegalArgumentException("c1 must be in (0, 1/2) not " + c1);
this.c1 = c1;
}
/**
* Returns the <i>sufficient decrease condition</i> constant
* @return the <i>sufficient decrease condition</i> constant
*/
public double getC1()
{
return c1;
}
@Override
public double lineSearch(double alpha_max, Vec x_k, Vec x_grad, Vec p_k, Function f, FunctionVec fp, double f_x, double gradP, Vec x_alpha_pk, double[] fxApRet, Vec grad_x_alpha_pk, boolean parallel)
{
if(Double.isNaN(f_x))
f_x = f.f(x_k, parallel);
if(Double.isNaN(gradP))
gradP = x_grad.dot(p_k);
double alpha = alpha_max;
if(x_alpha_pk == null)
x_alpha_pk = x_k.clone();
else
x_k.copyTo(x_alpha_pk);
x_alpha_pk.mutableAdd(alpha, p_k);
double f_xap = f.f(x_alpha_pk, parallel);
if(fxApRet != null)
fxApRet[0] = f_xap;
double oldAlpha = 0;
double oldF_xap = f_x;
while (f_xap > f_x + c1 * alpha * gradP)//we return start if its already good
{
final double tooSmall = 0.1*alpha;
final double tooLarge = 0.9*alpha;
//see INTERPOLATION section of chapter 3.5
//XXX double compare.
if(alpha == alpha_max)//quadratic interpolation
{
double alphaCandidate = -gradP*oldAlpha*oldAlpha/(2*(f_xap-f_x-gradP*oldAlpha));
oldAlpha = alpha;
if(alphaCandidate < tooSmall || alphaCandidate > tooLarge || Double.isNaN(alphaCandidate))
{
alpha = rho*oldAlpha;
}
else
{
alpha = alphaCandidate;
}
}
else//cubic interpoation
{
//g = φ(α1)−φ(0)−φ'(0)α1
double g = f_xap-f_x-gradP*alpha;
//h = φ(α0) − φ(0) − φ'(0)α0
double h = oldF_xap-f_x-gradP*oldAlpha;
double a0Sqrd = oldAlpha*oldAlpha;
double a1Sqrd = alpha*alpha;
double a = a0Sqrd*g-a1Sqrd*h;
a /= (a0Sqrd*a1Sqrd*(alpha-oldAlpha));
double b = -a0Sqrd*oldAlpha*g+a1Sqrd*alpha*h;
b /= (a0Sqrd*a1Sqrd*(alpha-oldAlpha));
double alphaCandidate = (-b + Math.sqrt(b*b-3*a*gradP))/(3*a);
oldAlpha = alpha;
if(alphaCandidate < tooSmall || alphaCandidate > tooLarge || Double.isNaN(alphaCandidate))
{
alpha = rho*oldAlpha;
}
else
{
alpha = alphaCandidate;
}
}
if(alpha < 1e-20)
return oldAlpha;
x_alpha_pk.mutableSubtract(oldAlpha - alpha, p_k);
oldF_xap = f_xap;
f_xap = f.f(x_alpha_pk, parallel);
if(fxApRet != null)
fxApRet[0] = f_xap;
}
return alpha;
}
@Override
public boolean updatesGrad()
{
return false;
}
@Override
public BacktrackingArmijoLineSearch clone()
{
return new BacktrackingArmijoLineSearch(rho, c1);
}
}
| 4,824 | 30.535948 | 203 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/GoldenSearch.java | package jsat.math.optimization;
import jsat.math.Function1D;
/**
* Minimizes a single variate function in the same way that
*
* @author Edward Raff
*/
public class GoldenSearch
{
/**
* Phi (golden ratio) minus 1
*/
private static final double tau = (Math.sqrt(5.0) - 1.0)/2.0;
private static final double om_tau = 1-tau;
/**
* Finds the local minimum of the function {@code f}.
* @param eps the desired accuracy of the result
* @param maxIterations the maximum number of iterations to perform
* @param a the left bound on the minimum
* @param b the right bound on the minimum
* @param f the function to find the minimize of
* @return the value of variable {@code pos} that produces the local minima
*/
public static double minimize(double eps, int maxIterations, double a, double b, Function1D f)
{
if (a > b)
{
double tmp = b;
b = a;
a = tmp;
}
//Intitial values
int iter = 0;
double x1 = a + om_tau*(b-a);
double f1 = f.f(x1);
double x2 = a + tau*(b-a);
double f2 = f.f(x2);
while (b - a > 2 * eps && iter < maxIterations)
{
if(f1 > f2)
{
a = x1;
x1 = x2;
f1 = f2;
x2 = a + tau*(b-a);
f2 = f.f(x2);
}
else//f1 < f2
{
b = x2;
x2 = x1;
f2 = f1;
x1 = a + om_tau*(b-a);
f1 = f.f(x1);
}
iter++;
}
return (a + b) / 2.0;
}
}
| 1,733 | 23.771429 | 98 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/LBFGS.java | package jsat.math.optimization;
import java.util.ArrayList;
import java.util.List;
import jsat.linear.*;
import jsat.math.*;
import jsat.utils.DoubleList;
/**
* Implementation of the Limited memory variant of {@link BFGS}. It uses a
* history of {@link #setM(int) m} items to solve {@code n} dimension problems
* with {@code O(m n)} work per iteration.
*
* @author Edward Raff
*/
public class LBFGS implements Optimizer
{
private int m;
private int maxIterations;
private LineSearch lineSearch;
private boolean inftNormCriterion = true;
/**
* Creates a new L-BFGS optimization object that uses a maximum of 500
* iterations and a {@link BacktrackingArmijoLineSearch Backtracking} line
* search. A {@link #setM(int) history} of 10 items will be used
*/
public LBFGS()
{
this(10);
}
/**
* Creates a new L-BFGS optimization object that uses a maximum of 500
* iterations and a {@link BacktrackingArmijoLineSearch Backtracking} line
* search.
* @param m the number of history items
*/
public LBFGS(int m)
{
this(m, 500, new BacktrackingArmijoLineSearch());
}
/**
* Creates a new L-BFGS optimization object
* @param m the number of history items
* @param maxIterations the maximum number of iterations before stopping
* @param lineSearch the line search method to use for optimization
*/
public LBFGS(int m, int maxIterations, LineSearch lineSearch)
{
setM(m);
setMaximumIterations(maxIterations);
setLineSearch(lineSearch);
}
/**
* See Algorithm 7.4 (L-BFGS two-loop recursion).
* @param x_grad the initial value ∇ f<sub>k</sub>
* @param rho
* @param s
* @param y
* @param q the location to store the value of H<sub>k</sub> ∇ f<sub>k</sub>
* @param alphas temp space to do work, should be as large as the number of history vectors
*/
public static void twoLoopHp(Vec x_grad, List<Double> rho, List<Vec> s, List<Vec> y, Vec q, double[] alphas)
{
//q ← ∇ fk;
x_grad.copyTo(q);
if(s.isEmpty())
return;//identity, we are done
//for i = k−1,k−2,...,k−m
for(int i = 0; i < s.size(); i++)
{
Vec s_i = s.get(i);
Vec y_i = y.get(i);
double alpha_i = alphas[i] = rho.get(i)*s_i.dot(q);
q.mutableSubtract(alpha_i, y_i);
}
//r ← Hk0q; and see eq (7.20), done in place in q
q.mutableMultiply(s.get(0).dot(y.get(0))/y.get(0).dot(y.get(0)));
//for i = k−m,k−m+1,...,k−1
for(int i = s.size()-1; i >= 0; i--)
{
//β ← ρ_i y_i^T r ;
double beta = rho.get(i)*y.get(i).dot(q);
//r ← r + si (αi − β)
q.mutableAdd(alphas[i]-beta, s.get(i));
}
}
@Override
public void optimize(double tolerance, Vec w, Vec x0, Function f, FunctionVec fp, boolean parallel)
{
if(fp == null)
fp = Function.forwardDifference(f);
LineSearch search = lineSearch.clone();
final double[] f_xVal = new double[1];//store place for f_x
//history for implicit H
List<Double> Rho = new DoubleList(m);
List<Vec> S = new ArrayList<>(m);
List<Vec> Y = new ArrayList<>(m);
Vec x_prev = x0.clone();
Vec x_cur = x0.clone();
f_xVal[0] = f.f(x_prev, parallel);
//graidnet
Vec x_grad = x0.clone();
x_grad.zeroOut();
Vec x_gradPrev = x_grad.clone();
//p_l
Vec p_k = x_grad.clone();
Vec s_k = x_grad.clone();
Vec y_k = x_grad.clone();
x_grad = fp.f(x_cur, x_grad, parallel);
double[] alphas = new double[m];
int iter = 0;
while(gradConvgHelper(x_grad) > tolerance && iter < maxIterations)
{
//p_k = −H_k ∇f_k; (6.18)
twoLoopHp(x_grad, Rho, S, Y, p_k, alphas);
p_k.mutableMultiply(-1);
//Set x_k+1 = x_k + α_k p_k where α_k is computed from a line search
x_cur.copyTo(x_prev);
x_grad.copyTo(x_gradPrev);
double alpha_k = search.lineSearch(1.0, x_prev, x_gradPrev, p_k, f, fp, f_xVal[0], x_gradPrev.dot(p_k), x_cur, f_xVal, x_grad, parallel);
if(alpha_k < 1e-12)//if we are making near epsilon steps consider it done
break;
if(!search.updatesGrad())
fp.f(x_cur, x_grad, parallel);
//Define s_k =x_k+1 −x_k and y_k = ∇f_k+1 −∇f_k;
x_cur.copyTo(s_k);
s_k.mutableSubtract(x_prev);
S.add(0, s_k.clone());
x_grad.copyTo(y_k);
y_k.mutableSubtract(x_gradPrev);
Y.add(0, y_k.clone());
Rho.add(0, 1/s_k.dot(y_k));
if(Double.isInfinite(Rho.get(0)) || Double.isNaN(Rho.get(0)))
{
Rho.clear();
S.clear();
Y.clear();
}
while(Rho.size() > m)
{
Rho.remove(m);
S.remove(m);
Y.remove(m);
}
iter++;
}
x_cur.copyTo(w);
}
/**
* By default the infinity norm is used to judge convergence. If set to
* {@code false}, the 2 norm will be used instead.
* @param inftNormCriterion
*/
public void setInftNormCriterion(boolean inftNormCriterion)
{
this.inftNormCriterion = inftNormCriterion;
}
/**
* Returns whether or not the infinity norm ({@code true}) or 2 norm
* ({@code false}) is used to determine convergence.
* @return {@code true} if the infinity norm is in use, {@code false} for
* the 2 norm
*/
public boolean isInftNormCriterion()
{
return inftNormCriterion;
}
private double gradConvgHelper(Vec grad)
{
if(!inftNormCriterion)
return grad.pNorm(2);
double max = 0;
for(IndexValue iv : grad)
max = Math.max(max, Math.abs(iv.getValue()));
return max;
}
/**
* Sets the number of history items to keep that are used to approximate the
* Hessian of the problem
* @param m the number of history items to keep
*/
public void setM(int m)
{
if(m < 1)
throw new IllegalArgumentException("m must be positive, not " + m);
this.m = m;
}
/**
* Returns the number of history items that will be used
* @return the number of history items that will be used
*/
public int getM()
{
return m;
}
/**
* Sets the line search method used at each iteration
* @param lineSearch the line search method used at each iteration
*/
public void setLineSearch(LineSearch lineSearch)
{
this.lineSearch = lineSearch;
}
/**
* Returns the line search method used at each iteration
* @return the line search method used at each iteration
*/
public LineSearch getLineSearch()
{
return lineSearch;
}
@Override
public void setMaximumIterations(int iterations)
{
if(iterations < 1)
throw new IllegalArgumentException("Number of iterations must be positive, not " + iterations);
this.maxIterations = iterations;
}
@Override
public int getMaximumIterations()
{
return maxIterations;
}
@Override
public LBFGS clone()
{
return new LBFGS(m, maxIterations, lineSearch.clone());
}
}
| 7,792 | 28.858238 | 149 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/LineSearch.java | package jsat.math.optimization;
import jsat.linear.Vec;
import jsat.math.Function;
import jsat.math.FunctionVec;
/**
* Line search defines a method of minimizing a function φ(α) =
* f(<b>x</b>+α <b>p</b>) where α > 0 is a scalar value, and
* <b>x</b> and <b>p</b> are fixed vectors. <br>
* <br>
* Different line search methods may or may not use all the input variables.<br>
* <br>
* The LineSearch is allowed to maintain a history of update values to use on
* future calls. For this reason, a {@link #clone() clone} of the line search
* should be used for each new optimization problem.
*
* @author Edward Raff
*/
public interface LineSearch
{
/**
* Attempts to find the value of α that minimizes
* f(<b>x</b>+α <b>p</b>)
*
* @param alpha_max the maximum value for α to search for
* @param x_k the initial value to search from
* @param x_grad the gradient of ∇ f(x<sub>k</sub>)
* @param p_k the direction update
* @param f the function to minimize the value of
* f(x<sub>k</sub> + α p<sub>k</sub>)
* @param fp the gradient of f, ∇f(x), may be {@code null} depending
* upon the linesearch method
* @param f_x the value of f(x<sub>k</sub>), or {@link Double#NaN} if it needs to be computed
* @param gradP the value of ∇f(x<sub>k</sub>)<sup>T</sup>p<sub>k</sub>,
* or {@link Double#NaN} if it needs to be computed
* @param x_alpha_pk the location to store the value of
* x<sub>k</sub> + α p<sub>k</sub>
* @param fxApRet an array to store the computed result of
* f(x<sub>k</sub> + α p<sub>k</sub>) in the first index
* contain. May be {@code null} and the value will not be returned
* @param grad_x_alpha_pk location to store the value of ∇ f(x<sub>k</sub>α+p<sub>k</sub>). May be {@code null}, local storage will be allocated if needed
* @return the value of α that satisfies the line search in minimizing f(x<sub>k</sub> + α p<sub>k</sub>)
*/
default public double lineSearch(double alpha_max, Vec x_k, Vec x_grad, Vec p_k, Function f, FunctionVec fp, double f_x, double gradP, Vec x_alpha_pk, double[] fxApRet, Vec grad_x_alpha_pk)
{
return lineSearch(alpha_max, x_k, x_grad, p_k, f, fp, f_x, gradP, x_alpha_pk, fxApRet, grad_x_alpha_pk, false);
}
/**
* Attempts to find the value of α that minimizes
* f(<b>x</b>+α <b>p</b>)
*
* @param alpha_max the maximum value for α to search for
* @param x_k the initial value to search from
* @param x_grad the gradient of ∇ f(x<sub>k</sub>)
* @param p_k the direction update
* @param f the function to minimize the value of
* f(x<sub>k</sub> + α p<sub>k</sub>)
* @param fp the gradient of f, ∇f(x), may be {@code null} depending
* upon the line search method
* @param f_x the value of f(x<sub>k</sub>), or {@link Double#NaN} if it needs to be computed
* @param gradP the value of ∇f(x<sub>k</sub>)<sup>T</sup>p<sub>k</sub>,
* or {@link Double#NaN} if it needs to be computed
* @param x_alpha_pk the location to store the value of
* x<sub>k</sub> + α p<sub>k</sub>
* @param fxApRet an array to store the computed result of
* f(x<sub>k</sub> + α p<sub>k</sub>) in the first index
* contain. May be {@code null} and the value will not be returned
* @param grad_x_alpha_pk location to store the value of ∇ f(x<sub>k</sub>α+p<sub>k</sub>). May be {@code null}, local storage will be allocated if needed
* @param parallel {@code true} if this line search should be done using multiple cores, or {@code false} to be single threaded.
* @return the value of α that satisfies the line search in minimizing f(x<sub>k</sub> + α p<sub>k</sub>)
*/
public double lineSearch(double alpha_max, Vec x_k, Vec x_grad, Vec p_k, Function f, FunctionVec fp, double f_x, double gradP, Vec x_alpha_pk, double[] fxApRet, Vec grad_x_alpha_pk, boolean parallel);
/**
* When performing the {@link #lineSearch(double, jsat.linear.Vec, jsat.linear.Vec, jsat.linear.Vec, jsat.math.Function, jsat.math.FunctionVec, double, double, jsat.linear.Vec, double[], jsat.linear.Vec) linear search}
* step some line searches may or may not use the gradient information. If
* the gradient information is used and updated, this method will return
* {@code true}. If not the given vector will be unused and not updated, and
* this method will return {@code false}
* @return {@code true} if the {@code grad_x_alpha_pk} parameter of
* lineSearch will be up-to-date after the call, or {@code false} if the
* gradient value will need to be computed after.
*/
public boolean updatesGrad();
/**
* Returns a clone of the line search object
* @return a clone of the line search object
*/
public LineSearch clone();
}
| 5,071 | 52.957447 | 222 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/ModifiedOWLQN.java | /*
* Copyright (C) 2015 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.math.optimization;
import java.util.ArrayList;
import java.util.List;
import jsat.linear.Vec;
import jsat.math.Function;
import jsat.math.FunctionVec;
import jsat.utils.DoubleList;
import static java.lang.Math.*;
import jsat.linear.ConstantVector;
import jsat.linear.IndexValue;
/**
* This implements the Modified Orthant-Wise Limited memory
* Quasi-Newton(mOWL-QN) optimizer. This algorithm is an extension of
* {@link LBFGS}, and solves minimization problems of the form: f(x) +
* {@link #setLambda(double) λ} ||x||<sub>1</sub>. It requires the
* function and it's gradient to work. <br>
* <br>
* See:<br>
* <ul>
* <li>Gong, P., & Ye, J. (2015). <i>A Modified Orthant-Wise Limited Memory
* Quasi-Newton Method with Convergence Analysis</i>. In The 32nd International
* Conference on Machine Learning (Vol. 37).</li>
* <li>Andrew, G., & Gao, J. (2007). <i>Scalable training of L1 -regularized
* log-linear models</i>. In Proceedings of the 24th international conference on
* Machine learning - ICML ’07 (pp. 33–40). New York, New York, USA: ACM Press.
* doi:10.1145/1273496.1273501</li>
* </ul>
*
*
* @author Edward Raff <[email protected]>
*/
public class ModifiedOWLQN implements Optimizer
{
private int m = 10;
private double lambda;
private Vec lambdaMultipler = null;
private static final double DEFAULT_EPS = 1e-12;
private static final double DEFAULT_ALPHA_0 = 1;
private static final double DEFAULT_BETA = 0.2;
private static final double DEFAULT_GAMMA = 1e-2;
private double eps = DEFAULT_EPS;
private double alpha_0 = DEFAULT_ALPHA_0;
private double beta = DEFAULT_BETA;
private double gamma = DEFAULT_GAMMA;
private int maxIterations = 500;
/**
* Creates a new mOWL-QN optimizer with no regularization penalty
*/
public ModifiedOWLQN()
{
this(0.0);
}
/**
* Creates a new mOWL-QN optimizer
* @param lambda the regularization penalty to use
*/
public ModifiedOWLQN(double lambda)
{
setLambda(lambda);
}
/**
* copy constructor
* @param toCopy the object to copy
*/
protected ModifiedOWLQN(ModifiedOWLQN toCopy)
{
this(toCopy.lambda);
if(toCopy.lambdaMultipler != null)
this.lambdaMultipler = toCopy.lambdaMultipler.clone();
this.eps = toCopy.eps;
this.m = toCopy.m;
this.alpha_0 = toCopy.alpha_0;
this.beta = toCopy.beta;
this.gamma = toCopy.gamma;
this.maxIterations = toCopy.maxIterations;
}
/**
* Sets the regularization term for the optimizer
* @param lambda the regularization penalty
*/
public void setLambda(double lambda)
{
if(lambda < 0 || Double.isInfinite(lambda) || Double.isNaN(lambda))
throw new IllegalArgumentException("lambda must be non-negative, not " + lambda);
this.lambda = lambda;
}
/**
* This method sets a vector that will contain a separate multiplier for
* {@link #setLambda(double) lambda} for each dimension of the problem. This
* allows for each dimension to have a different regularization penalty.<br>
* <br>
* If set to {@code null}, all dimensions will simply use λ as their
* regularization value.
*
* @param lambdaMultipler the per-dimension regularization multiplier, or {@code null}.
*/
public void setLambdaMultipler(Vec lambdaMultipler)
{
this.lambdaMultipler = lambdaMultipler;
}
public Vec getLambdaMultipler()
{
return lambdaMultipler;
}
/**
* Sets the number of history items to keep that are used to approximate the
* Hessian of the problem
*
* @param m the number of history items to keep
*/
public void setM(int m)
{
if (m < 1)
throw new IllegalArgumentException("m must be positive, not " + m);
this.m = m;
}
/**
* Returns the number of history items that will be used
*
* @return the number of history items that will be used
*/
public int getM()
{
return m;
}
/**
* Sets the epsilon term that helps control when the gradient descent step
* is taken instead of the normal Quasi-Newton step. Larger values cause
* more GD steps. You shouldn't need to alter this variable
* @param eps tolerance term for GD steps
*/
public void setEps(double eps)
{
if(eps < 0 || Double.isInfinite(eps) || Double.isNaN(eps))
throw new IllegalArgumentException("eps must be non-negative, not " + eps);
this.eps = eps;
}
public double getEps()
{
return eps;
}
/**
* Sets the shrinkage term used for the line search.
* @param beta the line search shrinkage term
*/
public void setBeta(double beta)
{
if(beta <= 0 || beta >= 1 || Double.isNaN(beta))
throw new IllegalArgumentException("shrinkage term must be in (0, 1), not " + beta);
this.beta = beta;
}
public double getBeta()
{
return beta;
}
@Override
public void optimize(double tolerance, Vec w, Vec x0, Function f, FunctionVec fp, boolean parallel)
{
if(fp == null)
fp = Function.forwardDifference(f);
//Algorithm 2 mOWL-QN: modified Orthant-Wise Limited memory Quasi-Newton
Vec lambdaMul = lambdaMultipler;
if(lambdaMultipler == null)
lambdaMul = new ConstantVector(1.0, x0.length());
Vec x_cur = x0.clone();
Vec x_grad = x0.clone();
Vec x_gradNext = x0.clone();
Vec x_grad_diff = x0.clone();
/**
* This value is where <> f(x) lives
*/
Vec v_k = x0.clone();
Vec d_k = x0.clone();
Vec p_k = x0.clone();
Vec x_alpha = x0.clone();
/**
* Difference between x_alpha and x_cur
*/
Vec x_diff = x0.clone();
//history for implicit H
List<Double> Rho = new DoubleList(m);
List<Vec> S = new ArrayList<>(m);
List<Vec> Y = new ArrayList<>(m);
double[] alphas = new double[m];
double f_x = f.f(x_cur, parallel);
f_x += getL1Penalty(x_cur, lambdaMul);
x_grad = fp.f(x_cur, x_grad, parallel);
//2: for k = 0 to maxiter do
for(int k = 0; k < maxIterations; k++)
{
double v_k_norm = 0;
//3: Compute v_k ← - <> f(xk)
for(int i = 0; i < x_grad.length(); i++)
{
double x_i = x_cur.get(i);
double l_i = x_grad.get(i);
double lambda_i = lambda*lambdaMul.get(i);
double newVal;
if(x_i > 0)
newVal = l_i+lambda_i;
else if(x_i < 0)
newVal = l_i-lambda_i;
else if(l_i+lambda_i < 0)//x_i == 0 is implicit
newVal = l_i+lambda_i;
else if(l_i-lambda_i > 0)//x_i == 0 is implicit
newVal = l_i-lambda_i;
else
newVal = 0;
v_k.set(i, -newVal);
v_k_norm += newVal*newVal;
}
v_k_norm = Math.sqrt(v_k_norm);
//Ik = {i ∈ {1, · · · ,n} : 0 < |x^k_i | ≤ ϵk,xk i vk i < 0}, where ϵk = min(∥vk∥, ϵ);
//we only really need to know if the set I_k is empty or not, the indicies are never used
double eps_k = Math.min(v_k_norm, eps);
boolean doGDstep = false;
for(int i = 0; i < v_k.length() && !doGDstep; i++)
{
double x_i = x_cur.get(i);
double v_i = v_k.get(i);
boolean isInI = 0 < abs(x_i) && abs(x_i) < eps_k && x_i*v_i < 0;
if(isInI)
doGDstep = true;
}
//5: Initialize α←α0;
double alpha = alpha_0;
double f_x_alpha = 0;//objective value for new x
if(!doGDstep)//6:if Ik = ∅ then (QN-step)
{
//8: Compute dk ←Hkvk using L-BFGS with S, Y ;
LBFGS.twoLoopHp(v_k, Rho, S, Y, d_k, alphas);
//9: Alignment: pk ←π(dk;vk);
for (int i = 0; i < p_k.length(); i++)
if (Math.signum(d_k.get(i)) == Math.signum(v_k.get(i)))
p_k.set(i, d_k.get(i));
else
p_k.set(i, 0.0);
//10: while Eq. (7) is not satisfied do
double rightSideMainTerm = gamma*v_k.dot(d_k);
alpha/=beta;//so when we multiply below we get the correct startng value
do
{
//11: α←αβ;
alpha *= beta;
//12: x^k(α)←π(x^k +α p^k; ξ^k);
x_cur.copyTo(x_alpha);
x_alpha.mutableSubtract(-alpha, p_k);
//projection step
for (int i = 0; i < p_k.length(); i++)
{
double x_i = x_cur.get(i);
double v_i = v_k.get(i);
double toUse = x_i != 0 ? x_i : v_i;
if (Math.signum(x_alpha.get(i)) != Math.signum(toUse))
x_alpha.set(i, 0.0);
}
f_x_alpha = f.f(x_alpha, parallel);
f_x_alpha += getL1Penalty(x_alpha, lambdaMul);
}
while(f_x_alpha > f_x - alpha*rightSideMainTerm );
x_alpha.copyTo(x_diff);
x_diff.mutableSubtract(x_cur);
}
else//(GD-step)
{
alpha/=beta;
do
{
alpha*=beta;
/*
* see section 2.3 of below to solve problem
* Gong, P., Zhang, C., Lu, Z., Huang, J., & Ye, J. (2013).
* A general iterative shrinkage and thresholding algorithm
* for non-convex regularized optimization problems.
* International Conference on Machine Learning, 28, 37–45.
* Retrieved from http://arxiv.org/abs/1303.4434
*
*/
//first use def u(k) = w(k) − ∇l(w)/t , where t = 1/alpha
x_grad.copyTo(x_alpha);
x_alpha.mutableMultiply(-alpha);
x_alpha.mutableAdd(x_cur);
//x_alpha noew has the value of u(k)
//we can now modify it into the correct solution using
//w^(k+1) = sign(u)max(0, |u|−λ/t)
for(int i = 0; i < x_alpha.length(); i++)
{
final double u_i = x_alpha.get(i);
final double lambda_i = lambda*lambdaMul.get(i);
x_alpha.set(i, signum(u_i)*max(0, abs(u_i)-lambda_i*alpha));
}
x_alpha.copyTo(x_diff);
x_diff.mutableSubtract(x_cur);
f_x_alpha = f.f(x_alpha, parallel);
f_x_alpha += getL1Penalty(x_alpha, lambdaMul);
}
while(f_x_alpha > f_x - gamma/(2*alpha)*x_diff.dot(x_diff));//eq(8) f(x^k(α)) ≤ f(x^k)− γ/(2α) || x^k(α)−x^k||^2
}
//update history
S.add(0, x_diff.clone());
x_gradNext = fp.f(x_alpha, x_gradNext, parallel);
//convergence check
double maxGrad = 0;
for(int i = 0; i < x_gradNext.length(); i++)
maxGrad = max(maxGrad, abs(x_gradNext.get(i)));
if(maxGrad < tolerance || f_x < tolerance || x_diff.pNorm(1) < tolerance )
break;
x_gradNext.copyTo(x_grad_diff);
x_grad_diff.mutableSubtract(x_grad);
Y.add(0, x_grad_diff.clone());
Rho.add(0, 1/x_diff.dot(x_grad_diff));
if(Double.isInfinite(Rho.get(0)) || Double.isNaN(Rho.get(0)))
{
Rho.clear();
S.clear();
Y.clear();
}
while(Rho.size() > m)
{
Rho.remove(m);
S.remove(m);
Y.remove(m);
}
//prepr for next iterations
f_x = f_x_alpha;
x_alpha.copyTo(x_cur);
x_gradNext.copyTo(x_grad);
}
x_cur.copyTo(w);
}
private double getL1Penalty(Vec w, Vec lambdaMul)
{
if(lambda <= 0)
return 0;
double pen = 0;
for(IndexValue iv : w)
pen += lambda*lambdaMul.get(iv.getIndex())*abs(iv.getValue());
return pen;
}
@Override
public void setMaximumIterations(int iterations)
{
if(iterations < 1)
throw new IllegalArgumentException("Number of iterations must be positive, not " + iterations);
this.maxIterations = iterations;
}
@Override
public int getMaximumIterations()
{
return maxIterations;
}
@Override
public ModifiedOWLQN clone()
{
return new ModifiedOWLQN(this);
}
}
| 14,517 | 32.841492 | 128 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/NelderMead.java |
package jsat.math.optimization;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.math.Function;
import jsat.math.FunctionVec;
import jsat.utils.ProbailityMatch;
/**
* The Nelder-Mean algorithm is a simple directed search method. As such, it does not need any information about
* the target functions derivative, or any data points. To perform best, the Nelder-Mean method needs N+1
* reasonable initial guesses for an N dimensional problem. <br>
* The Nelder-Mean method has the advantage that the only information it needs about the function it is going to minimize, is the function itself.
*
* @author Edward Raff
*/
public class NelderMead implements Optimizer
{
private static final long serialVersionUID = -2930235371787386607L;
/**
* Reflection constant
*/
private double reflection = 1.0;
/**
* Expansion constant
*/
private double expansion = 2.0;
/**
* Contraction constant
*/
private double contraction = -0.5;
/**
* Shrink constant
*/
private double shrink = 0.5;
private int maxIterations = 1000;
public NelderMead() {
}
public NelderMead(NelderMead toCopy)
{
this.reflection = toCopy.reflection;
this.expansion = toCopy.expansion;
this.contraction = toCopy.contraction;
this.shrink = toCopy.shrink;
this.maxIterations = toCopy.maxIterations;
}
/**
* Sets the reflection constant, which must be greater than 0
* @param reflection the reflection constant
*/
public void setReflection(double reflection)
{
if(reflection <=0 || Double.isNaN(reflection) || Double.isInfinite(reflection) )
throw new ArithmeticException("Reflection constant must be > 0, not " + reflection);
this.reflection = reflection;
}
/**
* Sets the expansion constant, which must be greater than 1 and the reflection constant
* @param expansion
*/
public void setExpansion(double expansion)
{
if(expansion <= 1 || Double.isNaN(expansion) || Double.isInfinite(expansion) )
throw new ArithmeticException("Expansion constant must be > 1, not " + expansion);
else if(expansion <= reflection)
throw new ArithmeticException("Expansion constant must be less than the reflection constant");
this.expansion = expansion;
}
/**
* Sets the contraction constant, which must be in the range (0, 1)
* @param contraction the contraction constant
*/
public void setContraction(double contraction)
{
if(contraction >= 1 || contraction <= 0 || Double.isNaN(contraction) || Double.isInfinite(contraction) )
throw new ArithmeticException("Contraction constant must be > 0 and < 1, not " + contraction);
this.contraction = contraction;
}
/**
* Sets the shrinkage constant, which must be in the range (0, 1)
* @param shrink
*/
public void setShrink(double shrink)
{
if(shrink >= 1 || shrink <= 0 || Double.isNaN(shrink) || Double.isInfinite(shrink) )
throw new ArithmeticException("Shrinkage constant must be > 0 and < 1, not " + shrink);
this.shrink = shrink;
}
@Override
public void optimize(double tolerance, Vec w, Vec x0, Function f, FunctionVec fp, boolean parallel)
{
List<Vec> initialPoints = new ArrayList<>();
initialPoints.add(x0.clone());
optimize(tolerance, maxIterations, f, initialPoints, parallel).copyTo(w);
}
/**
* Attempts to find the minimal value of the given function.
*
* @param eps the desired accuracy of the result.
* @param iterationLimit the maximum number of iteration steps to allow. This value must be positive
* @param f the function to optimize. This value can not be null
* @param initalPoints the list of initial guess points. If too small, new ones will be generated. if too large,
* the extra ones will be ignored. This list may not be empty
* @param parallel {@code true} if multiple threads should be used for
* optimization, or {@code false} if a single thread should be used.
* @return the computed value for the optimization.
*/
public Vec optimize(double eps, int iterationLimit, Function f, List<Vec> initalPoints, boolean parallel)
{
if(initalPoints.isEmpty())
throw new ArithmeticException("Empty Initial list. Can not determin dimension of problem");
Vec init = initalPoints.get(0);
int N = initalPoints.get(0).length();
//The simplex verticies paired with their value from the objective function
List<ProbailityMatch<Vec>> simplex = new ArrayList<>(N);
for(Vec vars : initalPoints)
simplex.add(new ProbailityMatch<>(f.f(vars, parallel), vars.clone()));
Random rand = new Random(initalPoints.hashCode());
while(simplex.size() < N+1)
{
//Better simplex geneartion?
DenseVector newSimplex = new DenseVector(N);
for(int i = 0; i < newSimplex.length(); i++)
if(init.get(i) != 0)
newSimplex.set(i, init.get(i)*rand.nextGaussian());
else
newSimplex.set(i, rand.nextGaussian());
simplex.add(new ProbailityMatch<>(f.f(newSimplex, parallel), newSimplex));
}
Collections.sort(simplex);
//Remove superfolusly given points
while(simplex.size() > N+1)
simplex.remove(simplex.size()-1);
//Center of gravity point
Vec x0 = new DenseVector(N);
//reflection point
Vec xr = new DenseVector(N);
//Extension point, also used for contraction
Vec xec = new DenseVector(N);
//Temp space for compuations
Vec tmp = new DenseVector(N);
final int lastIndex = simplex.size()-1;
for(int iterationCount = 0; iterationCount < iterationLimit; iterationCount++)
{
//Convergence check
if(Math.abs(simplex.get(lastIndex).getProbability() - simplex.get(0).getProbability()) < eps)
break;
//Step 2: valculate x0
x0.zeroOut();
for(ProbailityMatch<Vec> pm : simplex)
x0.mutableAdd(pm.getMatch());
x0.mutableDivide(simplex.size());
//Step 3: Reflection
x0.copyTo(xr);
x0.copyTo(tmp);
tmp.mutableSubtract(simplex.get(lastIndex).getMatch());
xr.mutableAdd(reflection, tmp);
double fxr = f.f(xr);
if(simplex.get(0).getProbability() <= fxr && fxr < simplex.get(lastIndex-1).getProbability())
{
insertIntoSimplex(simplex, xr, fxr);
continue;
}
//Step 4: Expansion
if(fxr < simplex.get(0).getProbability())//Best so far
{
x0.copyTo(xec);
xec.mutableAdd(expansion, tmp);//tmp still contains (x0-xWorst)
double fxec = f.f(xec);
if(fxec < fxr)
insertIntoSimplex(simplex, xec, fxec);//Even better! Use this one
else
insertIntoSimplex(simplex, xr, fxr);//Ehh, wasnt as good as we thought
continue;
}
//Step 5: Contraction
x0.copyTo(xec);
xec.mutableAdd(contraction, tmp);
double fxec = f.f(xec);
if(fxec < simplex.get(lastIndex).getProbability())
{
insertIntoSimplex(simplex, xec, fxec);
continue;
}
//Step 6: Reduction
Vec xBest = simplex.get(0).getMatch();
for(int i = 1; i < simplex.size(); i++)
{
ProbailityMatch<Vec> pm = simplex.get(i);
Vec xi = pm.getMatch();
xi.mutableSubtract(xBest);
xi.mutableMultiply(shrink);
xi.mutableAdd(xBest);
pm.setProbability(f.f(xi));
}
Collections.sort(simplex);
}
return simplex.get(0).getMatch();
}
private static void insertIntoSimplex(List<ProbailityMatch<Vec>> simplex, Vec x, double fx)
{
//We are removing the last element and inserting a new one that is better
ProbailityMatch<Vec> pm = simplex.remove(simplex.size() - 1);
pm.setProbability(fx);
x.copyTo(pm.getMatch());
//Now put it in the correct place
int sortInto = Collections.binarySearch(simplex, pm);
if (sortInto >= 0)
simplex.add(sortInto, pm);
else
{
sortInto = -(sortInto)-1;
if(sortInto == simplex.size())//Then it was just better thne the last
simplex.add(pm);
else//It was a bit better then that
simplex.add(sortInto, pm);
}
}
@Override
public void setMaximumIterations(int iterations)
{
if(iterations <= 0)
throw new IllegalArgumentException("Number of iterations must be positive, not " + iterations);
this.maxIterations = iterations;
}
@Override
public int getMaximumIterations()
{
return maxIterations;
}
@Override
public NelderMead clone() {
return new NelderMead(this);
}
}
| 9,693 | 34.903704 | 147 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/Optimizer.java | package jsat.math.optimization;
import jsat.linear.Vec;
import jsat.math.Function;
import jsat.math.FunctionVec;
/**
* This interface defines a contract for multivariate function minimization.<br>
* <br>
* Different optimization methods will use or require different amounts of
* information. Depending on the optimizer, the 1st derivative may not be
* necessary and can be {@code null}.
*
* @author Edward Raff
*/
public interface Optimizer
{
/**
* Attempts to optimize the given function by finding the value of {@code w}
* that will minimize the value returned by {@code f(w)}, using
* <i>w = x<sub>0</sub></i> as an initial starting point.
*
* @param tolerance the value that the gradient norm must be less than to
* consider converged
* @param w the the location to store the final solution
* @param x0 the initial guess for the solution. This value will not be
* changed, and intermediate matrices will be created as the same type.
* @param f the objective function to minimizer
* @param fp the derivative of the objective function, may be {@code null}
* depending on the optimizer
*/
default public void optimize(double tolerance, Vec w, Vec x0, Function f, FunctionVec fp)
{
optimize(tolerance, w, x0, f, fp, false);
}
/**
* Attempts to optimize the given function by finding the value of {@code w}
* that will minimize the value returned by {@code f(w)}, using
* <i>w = x<sub>0</sub></i> as an initial starting point.
*
* @param tolerance the value that the gradient norm must be less than to
* consider converged
* @param w the the location to store the final solution
* @param x0 the initial guess for the solution. This value will not be
* changed, and intermediate matrices will be created as the same type.
* @param f the objective function to minimizer
* @param fp the derivative of the objective function, may be {@code null}
* depending on the optimizer
* @param parallel {@code true} if multiple threads should be used for
* optimization, or {@code false} if a single thread should be used.
*/
public void optimize(double tolerance, Vec w, Vec x0, Function f, FunctionVec fp, boolean parallel);
/**
* Sets the maximum number of iterations allowed for the optimization method
* @param iterations the maximum number of iterations to perform
*/
public void setMaximumIterations(int iterations);
/**
* Returns the maximum number of iterations to perform
* @return the maximum number of iterations to perform
*/
public int getMaximumIterations();
public Optimizer clone();
}
| 2,754 | 38.927536 | 104 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/RosenbrockFunction.java |
package jsat.math.optimization;
import jsat.linear.DenseVector;
import jsat.linear.Vec;
import jsat.math.Function;
import static java.lang.Math.*;
import java.util.concurrent.ExecutorService;
import jsat.math.FunctionVec;
/**
* The Rosenbrock function is a function with at least one minima with the value zero. It is often used as a benchmark for optimization problems. <br>
* The minima is the vector of all ones. Once N %gt; 3, more then one minima can occur.
*
* @author Edward Raff
*/
public class RosenbrockFunction implements Function
{
private static final long serialVersionUID = -5573482950045304948L;
@Override
public double f(Vec x, boolean parallel)
{
int N = x.length();
double f = 0.0;
for(int i = 1; i < N; i++)
{
double x_p = x.get(i-1);
double xi = x.get(i);
f += pow(1.0-x_p, 2)+100.0*pow(xi-x_p*x_p, 2);
}
return f;
}
/**
* Returns the gradient of the Rosenbrock function
* @return the gradient of the Rosenbrock function
*/
public FunctionVec getDerivative()
{
return GRADIENT;
}
/**
* The gradient of the Rosenbrock function
*/
public static final FunctionVec GRADIENT = new FunctionVec()
{
@Override
public Vec f(Vec x, Vec drv, boolean parallel)
{
int N = x.length();
if (drv == null)
drv = x.clone();
drv.zeroOut();
drv.set(0, -400 * x.get(0) * (x.get(1) - pow(x.get(0), 2)) - 2 * (1 - x.get(0)));
for (int i = 1; i < N - 1; i++)
{
double x_p = x.get(i - 1);
double x_i = x.get(i);
double x_n = x.get(i + 1);
drv.set(i, 200 * (x_i - x_p * x_p) - 400 * x_i * (x_n - x_i * x_i) - 2 * (1 - x_i));
}
drv.set(N - 1, 200 * (x.get(N - 1) - pow(x.get(N - 2), 2)));
return drv;
}
};
}
| 2,026 | 25.671053 | 150 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/WolfeNWLineSearch.java | package jsat.math.optimization;
import jsat.linear.Vec;
import jsat.math.Function;
import jsat.math.FunctionVec;
import static java.lang.Math.*;
/**
* An implementation of the Wolfe Line Search algorithm described by Nocedal and
* Wright in <i>Numerical Optimization</i> (2nd edition) on pages 59-63.
*
* @author Edward Raff
*/
public class WolfeNWLineSearch implements LineSearch
{
//default values that make setting in the constructor simple (shouldn't actually use)
private double c1 = Math.nextUp(0), c2 = Math.nextAfter(1, Double.NEGATIVE_INFINITY);
/**
* Creates a new Wolfe line search with {@link #setC1(double) } set to
* {@code 1e-4} and {@link #setC2(double) } to {@code 0.9}
*/
public WolfeNWLineSearch()
{
this(1e-4, 0.9);
}
/**
* Creates a new Wolfe line search
* @param c1 the <i>sufficient decrease condition</i> constant
* @param c2 the <i>curvature condition</i> constant
*/
public WolfeNWLineSearch(double c1, double c2)
{
setC1(c1);
setC2(c2);
}
private AlphaInit initMethod = AlphaInit.METHOD1;
double alpha_prev = -1, f_x_prev = Double.NaN, gradP_prev = Double.NaN;
public enum AlphaInit
{
/**
* Initializes the new α value via α<sub>prev</sub>
* ∇f(x<sub>prev</sub>)<sup>T</sup>p<sub>prev</sub>/
* ∇f(x<sub>cur</sub>)<sup>T</sup>p<sub>cur</sub>
*/
METHOD1,
/**
* Initializes the new α value via
* 2( f(x<sub>cur</sub>)-f(x<sub>prev</sub>))/φ'(0)
*/
METHOD2
}
/**
* Sets the constant used for the <i>sufficient decrease condition</i>
* f(x+α p) ≤ f(x) + c<sub>1</sub> α p<sup>T</sup>∇f(x)
* <br>
* <br>
* This value must always be less than {@link #setC2(double) }
* @param c1 the <i>sufficient decrease condition</i>
*/
public void setC1(double c1)
{
if(c1 <= 0)
throw new IllegalArgumentException("c1 must be greater than 0, not " + c1);
else if(c1 >= c2)
throw new IllegalArgumentException("c1 must be less than c2");
this.c1 = c1;
}
/**
* Returns the <i>sufficient decrease condition</i> constant
* @return the <i>sufficient decrease condition</i> constant
*/
public double getC1()
{
return c1;
}
/**
* Sets the constant used for the <i>curvature condition</i>
* p<sup>T</sup> ∇f(x+α p) ≥ c<sub>2</sub> p<sup>T</sup>∇f(x)
* @param c2 the <i>curvature condition</i> constant
*/
public void setC2(double c2)
{
if(c2 >= 1)
throw new IllegalArgumentException("c2 must be less than 1, not " + c2);
else if(c2 <= c1)
throw new IllegalArgumentException("c2 must be greater than c1");
this.c2 = c2;
}
/**
* Returns the <i>curvature condition</i> constant
* @return the <i>curvature condition</i> constant
*/
public double getC2()
{
return c2;
}
@Override
public double lineSearch(double alpha_max, Vec x_k, Vec x_grad, Vec p_k, Function f, FunctionVec fp, double f_x, double gradP, Vec x_alpha_pk, double[] fxApRet, Vec grad_x_alpha_pk, boolean parallel)
{
if(Double.isNaN(f_x))
f_x = f.f(x_k, parallel);
if(Double.isNaN(gradP))
gradP = x_grad.dot(p_k);
final double phi0 = f_x, phi0P = gradP;
double alpha_cur = 1;
if(!Double.isNaN(gradP_prev) && initMethod == AlphaInit.METHOD1)
{
alpha_cur = alpha_prev*gradP_prev/gradP;
}
else if(!Double.isNaN(f_x_prev) && initMethod == AlphaInit.METHOD2)
{
alpha_cur = 2*(f_x-f_x_prev)/phi0P;
alpha_cur = min(1, 1.01*(alpha_cur));
}
alpha_cur = max(alpha_cur, 1e-13);
//2.5.13 from OPTIMIZATION THEORY AND METHODS Nonlinear Programming
alpha_prev = 0;
double phi_prev = phi0;
double phi_prevP = phi0P;
double valToUse = 0;
x_k.copyTo(x_alpha_pk);
for(int iter = 1; iter <= 10 && valToUse == 0; iter++)
{
//Evaluate φ(αi );
x_alpha_pk.mutableAdd(alpha_cur-alpha_prev, p_k);
double phi_cur = f.f(x_alpha_pk, parallel);
if(fxApRet != null)
fxApRet[0] = phi_cur;
double phi_curP = fp.f(x_alpha_pk, grad_x_alpha_pk, parallel).dot(p_k);//computed early b/c used in interpolation in zoom
//if φ(αi)>φ(0)+c1 αi φ'(0) or[φ(αi)≥φ(αi−1) and i >1]
if(phi_cur > phi0 + c1*alpha_cur*phi0P || (phi_cur >= phi_prev && iter > 1) )
{
//α∗ ←zoom(αi−1,αi) and stop;
valToUse = zoom(alpha_prev, alpha_cur, phi_prev, phi_cur, phi_prevP, phi_curP, phi0, phi0P, x_k, x_alpha_pk, p_k, f, fp, fxApRet, grad_x_alpha_pk, parallel);
break;
}
//Evaluate φ'(αi );
//if |φ'(αi )| ≤ −c2φ'(0)
if(abs(phi_curP) <= -c2*phi0P)
{
valToUse = alpha_cur;//set α∗ ← αi and stop;
break;
}
//if φ'(αi ) ≥ 0
if(phi_curP >= 0)
{
//set α∗ ←zoom(αi,αi−1) and stop;
valToUse = zoom(alpha_cur, alpha_prev, phi_cur, phi_prev, phi_curP, phi_prevP, phi0, phi0P, x_k, x_alpha_pk, p_k, f, fp, fxApRet, grad_x_alpha_pk, parallel);
break;
}
//Choose αi+1 ∈(αi,αmax);
///err, just double it?
alpha_prev = alpha_cur;
phi_prev = phi_cur;
phi_prevP = phi_curP;
alpha_cur *= 2;
if(alpha_cur >= alpha_max)//hit the limit
{
valToUse = alpha_max;
break;
}
}
alpha_prev = valToUse;
f_x_prev = f_x;
gradP_prev = gradP;
return valToUse;
}
/**
*
*
* @param alphaLow the value of alphaLow
* @param alphaHi the value of alphaHi
* @param phi_alphaLow the value of phi_alphaLow
* @param phi_alphaHigh the value of phi_alphaHigh
* @param phi_alphaLowP the value of phi_alphaLowP
* @param phi_alphaHighP the value of phi_alphaHighP
* @param phi0 the value of phi0
* @param phi0P the value of phi0P
* @param x the value of x
* @param x_alpha_p the value of x_alpha_p
* @param p the value of p
* @param f the value of f
* @param fp the value of fp
* @param fxApRet the value of fxApRet
* @param grad_x_alpha_pk the value of grad_x_alpha_pk
* @param parallel
* @return the double
*/
private double zoom(double alphaLow, double alphaHi, double phi_alphaLow, double phi_alphaHigh, double phi_alphaLowP, double phi_alphaHighP, double phi0, double phi0P, Vec x, Vec x_alpha_p, Vec p, Function f, FunctionVec fp, double[] fxApRet, Vec grad_x_alpha_pk, boolean parallel)
{
double alpha_j = alphaLow;
for(int iter = 0; iter < 10; iter++)
{
//try cubic interp eq (3.59)
{
double d1 = phi_alphaLowP+phi_alphaHighP-3*(phi_alphaLow-phi_alphaHigh)/(alphaLow-alphaHi);
double d2 = signum(alphaHi-alphaLow)*pow(d1*d1-phi_alphaLowP*phi_alphaHighP, 0.5);
alpha_j = alphaHi-(alphaHi-alphaLow)*(phi_alphaHighP+d2-d1)/(phi_alphaHighP-phi_alphaLowP+2*d2);
}
//check if we were too close to the edge
if(alpha_j-(alphaHi-alphaLow)/2*0.1 < alphaLow || alpha_j > alphaHi*0.9)
alpha_j = min(alphaLow, alphaHi) + abs(alphaHi-alphaLow)/2;
x.copyTo(x_alpha_p);
x_alpha_p.mutableAdd(alpha_j, p);
//Evaluate φ(αj );
double phi_j = f.f(x_alpha_p, parallel);
if(fxApRet != null)
fxApRet[0] = phi_j;
double phi_jP = fp.f(x_alpha_p, grad_x_alpha_pk, parallel).dot(p);//computed early
//if φ(αj ) > φ(0) + c1αj φ'(0) or φ(αj ) ≥ φ(αlo)
if(phi_j > phi0 + c1*alpha_j*phi0 || phi_j >= phi_alphaLow)
{
//αhi ←αj;
alphaHi = alpha_j;
phi_alphaHigh = phi_j;
phi_alphaHighP = phi_jP;
}
else
{
//Evaluate φ'(αj );
//if |φ'(αj )| ≤ −c2φ'(0)
if(abs(phi_jP) <= c2*phi0P)
return alpha_j;//Set α∗ ← αj and stop;
//if φ'(αj)(αhi −αlo)≥0
if(phi_jP*(alphaHi-alphaLow) >= 0)
{
//αhi ← αlo;
alphaHi = alphaLow;
phi_alphaHigh = phi_alphaLow;
phi_alphaHighP = phi_alphaLowP;
}
//αlo ←αj;
alphaLow = alpha_j;
phi_alphaLow = phi_j;
phi_alphaLowP = phi_jP;
}
}
return alpha_j;
}
@Override
public boolean updatesGrad()
{
return true;
}
@Override
public WolfeNWLineSearch clone()
{
WolfeNWLineSearch clone = new WolfeNWLineSearch(c1, c2);
clone.initMethod = this.initMethod;
clone.alpha_prev = this.alpha_prev;
clone.f_x_prev = this.f_x_prev;
clone.gradP_prev = this.gradP_prev;
return clone;
}
}
| 9,716 | 33.457447 | 285 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/oned/GoldenSearch.java | /*
* Copyright (C) 2015 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.math.optimization.oned;
import jsat.math.Function1D;
/**
* The class provides an implementation of the Golden Search method of function
* minimization.
*
* @author Edward Raff <[email protected]>
*/
public class GoldenSearch
{
private static final double goldenRatio = (Math.sqrt(5) - 1) / 2;
/**
* Attempts to numerically find the value {@code x} that minimizes the one
* dimensional function {@code f(x)} in the range {@code [min, max]}.
*
* @param min the minimum of the search range
* @param max the maximum of the search range
* @param f the one dimensional function to minimize
* @param eps the desired accuracy of the returned value
* @param maxSteps the maximum number of search steps to take
* @return the value {@code x} that appears to minimize {@code f(x)}
*/
public static double findMin(double min, double max, Function1D f, double eps, int maxSteps)
{
double a = min, b = max;
double fa = f.f(a), fb = f.f(b);
double c = b - goldenRatio * (b - a);
double d = a + goldenRatio * (b - a);
double fc = f.f(c);
double fd = f.f(d);
while(Math.abs(c-d) > eps && maxSteps-- > 0)
{
if (fc < fd)
{
// (b, f(b)) ← (d, f(d))
b = d;
fb = fd;
//(d, f(d)) ← (c, f(c))
d = c;
fd = fc;
// update c = b + φ (a - b) and f(c)
c = b - goldenRatio * (b - a);
fc = f.f(c);
}
else
{
//(a, f(a)) ← (c, f(c))
a = c;
fa = fc;
//(c, f(c)) ← (d, f(d))
c = d;
fc = fd;
// update d = a + φ (b - a) and f(d)
d = a + goldenRatio * (b - a);
fd = f.f(d);
}
}
return (a+b)/2;
}
}
| 2,734 | 31.951807 | 96 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/stochastic/AdaDelta.java | package jsat.math.optimization.stochastic;
import jsat.linear.DenseVector;
import jsat.linear.IndexValue;
import jsat.linear.ScaledVector;
import jsat.linear.Vec;
/**
* AdaDelta is inspired by {@link AdaGrad} and was developed for use primarily
* in neural networks. It still maintains a per feature learning rate, however
* unlike AdaGrad the learning rates may increase over time and are highly
* robust to any individual learning rate. <br>
* <br>
* See: Zeiler, M. D. (2012). <i>ADADELTA: An Adaptive Learning Rate Method</i>.
* CoRR, abs/1212.5.
*
* @author Edward Raff
*/
public class AdaDelta implements GradientUpdater
{
private static final long serialVersionUID = 5855631993426837618L;
private double rho;
private Vec gSqrd;
private Vec deltaXSqrt;
private double biasGSqrd;
private double deltaBiasSqrt;
private double eps = 0.0001;
/**
* Creates a new AdaDelta updater using a decay rate of 0.95
*/
public AdaDelta()
{
this(0.95);
}
/**
* Creates a new AdaDelta updater
* @param rho the decay rate to use
*/
public AdaDelta(double rho)
{
setRho(rho);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public AdaDelta(AdaDelta toCopy)
{
this.rho = toCopy.rho;
if(toCopy.gSqrd != null)
{
this.gSqrd = toCopy.gSqrd.clone();
this.deltaXSqrt = toCopy.deltaXSqrt.clone();
}
this.biasGSqrd = toCopy.biasGSqrd;
this.deltaBiasSqrt = toCopy.deltaBiasSqrt;
}
/**
* Sets the decay rate used by AdaDelta. Lower values focus more on the
* current gradient, where higher values incorporate a longer history.
*
* @param rho the decay rate in (0, 1) to use
*/
public void setRho(double rho)
{
if(rho <= 0 || rho >= 1 || Double.isNaN(rho))
throw new IllegalArgumentException("Rho must be in (0, 1)");
this.rho = rho;
}
/**
*
* @return the decay rate that will be used
*/
public double getRho()
{
return rho;
}
@Override
public void update(Vec x, Vec grad, double eta)
{
update(x, grad, eta, 0, 0);
}
@Override
public double update(Vec x, Vec grad, double eta, double bias, double biasGrad)
{
gSqrd.mutableMultiply(rho);
biasGSqrd *= rho;
for(IndexValue iv : grad)
{
final int indx = iv.getIndex();
final double grad_i = iv.getValue();
gSqrd.increment(indx, grad_i*grad_i*(1-rho));//step 4
final double gSqrd_i = gSqrd.get(indx);
final double deltaX_i = deltaXSqrt.get(indx);
final double newDeltaX_i = -Math.sqrt((deltaX_i+eps)/(gSqrd_i+eps))*grad_i;//step 5
x.increment(indx, eta*newDeltaX_i);//step 7
deltaXSqrt.increment(indx, (1-rho)/rho*newDeltaX_i*newDeltaX_i);//step 6, using (1-rho)/rho so we can multiply by rho at the end to get the correct result
}
//step 6 correction, apply rho to the left hand side
deltaXSqrt.mutableMultiply(rho);
//bias term
biasGSqrd += biasGrad*biasGrad*(1-rho);
double newDeltaBias = Math.sqrt((deltaBiasSqrt+eps)/(biasGSqrd+eps))*biasGrad;
double biasUpdate = eta*newDeltaBias;
deltaBiasSqrt += (1-rho)/rho*newDeltaBias*newDeltaBias;
deltaBiasSqrt *= rho;
return biasUpdate;
}
@Override
public AdaDelta clone()
{
return new AdaDelta(this);
}
@Override
public void setup(int d)
{
gSqrd = new ScaledVector(new DenseVector(d));
deltaXSqrt = new ScaledVector(new DenseVector(d));
deltaBiasSqrt = biasGSqrd = 0;
}
}
| 3,850 | 27.109489 | 168 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/stochastic/AdaGrad.java | package jsat.math.optimization.stochastic;
import jsat.linear.ConstantVector;
import jsat.linear.DenseVector;
import jsat.linear.IndexValue;
import jsat.linear.Vec;
/**
* AdaGrad provides an adaptive learning rate for each individual feature<br>
* <br>
* See: Duchi, J., Hazan, E.,&Singer, Y. (2011). <i>Adaptive Subgradient
* Methods for Online Learning and Stochastic Optimization</i>. Journal of
* Machine Learning Research, 12, 2121–2159.
*
* @author Edward Raff
*/
public class AdaGrad implements GradientUpdater
{
private static final long serialVersionUID = 5138474612999751777L;
private Vec daigG;
private double biasG;
/**
* Creates a new AdaGrad updater
*/
public AdaGrad()
{
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public AdaGrad(AdaGrad toCopy)
{
if(toCopy.daigG != null)
this.daigG = toCopy.daigG.clone();
this.biasG = toCopy.biasG;
}
@Override
public void update(Vec x, Vec grad, double eta)
{
update(x, grad, eta, 0, 0);
}
@Override
public double update(Vec x, Vec grad, double eta, double bias, double biasGrad)
{
for(IndexValue iv : grad)
{
final int indx = iv.getIndex();
final double grad_i = iv.getValue();
final double g_ii = daigG.get(indx);
x.increment(indx, -eta*grad_i/Math.sqrt(g_ii));
daigG.increment(indx, grad_i*grad_i);
}
double biasUpdate = eta*biasGrad/Math.sqrt(biasG);
biasG += biasGrad*biasGrad;
return biasUpdate;
}
@Override
public AdaGrad clone()
{
return new AdaGrad(this);
}
@Override
public void setup(int d)
{
daigG = new DenseVector(new ConstantVector(1.0, d));
biasG = 1;
}
}
| 1,885 | 22.575 | 83 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/stochastic/Adam.java | package jsat.math.optimization.stochastic;
import jsat.linear.DenseVector;
import jsat.linear.IndexValue;
import jsat.linear.ScaledVector;
import jsat.linear.Vec;
import static java.lang.Math.*;
/**
* Adam is inspired by {@link RMSProp} and {@link AdaGrad}, where the former can
* be seen as a special case of Adam. Adam has been shown to work well in
* training neural networks, and still converges well with sparse gradients.<br>
* NOTE: that while it will converge, Adam dose not support sparse updates. So
* runtime when in highly sparse environments will be hampered. <br>
* <br>
* See: Kingma, D. P.,&Ba, J. L. (2015). <i>Adam: A Method for Stochastic
* Optimization</i>. In ICLR.
*
* @author Edward Raff
*/
public class Adam implements GradientUpdater
{
private static final long serialVersionUID = 5352504067435579553L;
//internal state
/**
* 1st moment vector
*/
private Vec m;
/**
* 2nd moment vector
*/
private Vec v;
/**
* time step
*/
private long t;
//parameters of the algo
private double alpha;
private double beta_1;
private double beta_2;
private double eps;
private double lambda;
private double vBias;
private double mBias;
public static final double DEFAULT_ALPHA = 0.0002;
public static final double DEFAULT_BETA_1 = 0.1;
public static final double DEFAULT_BETA_2 = 0.001;
public static final double DEFAULT_EPS = 1e-8;
public static final double DEFAULT_LAMBDA = 1e-8;
public Adam()
{
this(DEFAULT_ALPHA, DEFAULT_BETA_1, DEFAULT_BETA_2, DEFAULT_EPS, DEFAULT_LAMBDA);
}
public Adam(double alpha, double beta_1, double beta_2, double eps, double lambda)
{
if(alpha <= 0 || Double.isInfinite(alpha) || Double.isNaN(alpha))
throw new IllegalArgumentException("alpha must be a positive value, not " + alpha);
if(beta_1 <= 0 || beta_1 > 1 || Double.isInfinite(beta_1) || Double.isNaN(beta_1))
throw new IllegalArgumentException("beta_1 must be in (0, 1], not " + beta_1);
if(beta_2 <= 0 || beta_2 > 1 || Double.isInfinite(beta_2) || Double.isNaN(beta_2))
throw new IllegalArgumentException("beta_2 must be in (0, 1], not " + beta_2);
if(pow(1-beta_1, 2) / sqrt(1-beta_2) >= 1)
throw new IllegalArgumentException("the required property (1-beta_1)^2 / sqrt(1-beta_2) < 1, is not held by beta_1=" + beta_1 + " and beta_2=" + beta_2 );
if(lambda <= 0 || lambda >= 1 || Double.isInfinite(lambda) || Double.isNaN(lambda))
throw new IllegalArgumentException("lambda must be in (0, 1), not " + lambda);
this.alpha = alpha;
this.beta_1 = beta_1;
this.beta_2 = beta_2;
this.eps = eps;
this.lambda = lambda;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public Adam(Adam toCopy)
{
this.alpha = toCopy.alpha;
this.beta_1 = toCopy.beta_1;
this.beta_2 = toCopy.beta_2;
this.eps = toCopy.eps;
this.lambda = toCopy.lambda;
this.t = toCopy.t;
this.mBias = toCopy.mBias;
this.vBias = toCopy.vBias;
if(toCopy.m != null)
{
this.m = toCopy.m.clone();
this.v = toCopy.v.clone();
}
}
@Override
public void update(Vec x, Vec grad, double eta)
{
update(x, grad, eta, 0, 0);
}
@Override
public double update(Vec x, Vec grad, double eta, double bias, double biasGrad)
{
t++;
//(Decay the first moment running average coefficient
double beta_1t = 1 - (1-beta_1)*pow(lambda, t-1);
//(Get gradients w.r.t. stochastic objective at timestep t)
//grad is already that value
//(Update biased first moment estimate)
m.mutableMultiply(1-beta_1t);
m.mutableAdd(beta_1t, grad);
mBias = (1-beta_1t)+beta_1t*biasGrad;
//(Update biased second raw moment estimate)
v.mutableMultiply(1-beta_2);
vBias = (1-beta_2)*vBias + beta_2 *biasGrad *biasGrad;
for(final IndexValue iv : grad)
{
final double g_i = iv.getValue();
v.increment(iv.getIndex(), beta_2*(g_i*g_i));
}
/*
* "Note that the efficiency of algorithm 1 can, at the expense of
* clarity, be improved upon by changing the order of computation, e.g.
* by replacing the last three lines in the loop with the following
* line:"
* θ_t = θ_{t−1} −[α ·√(1−(1−β_2)^t) · (1−(1−β_1)^t)−1] ·m_t/√v_t
*/
double cnst = eta*alpha*sqrt(1-pow((1-beta_2), t))/(1-pow((1-beta_1), t));
//while the algorithm may converge well with sparse data, m and v are likely to all be non-zero after observing lots of data.
for(int i = 0; i < m.length(); i++)
x.increment(i, -cnst * m.get(i)/(sqrt(v.get(i))+eps));
return cnst * mBias/(sqrt(vBias)+eps);
}
@Override
public Adam clone()
{
return new Adam(this);
}
@Override
public void setup(int d)
{
t = 0;
m = new ScaledVector(new DenseVector(d));
v = new ScaledVector(new DenseVector(d));
vBias = mBias = 0;
}
}
| 5,356 | 32.48125 | 166 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/stochastic/GradientUpdater.java | package jsat.math.optimization.stochastic;
import java.io.Serializable;
import jsat.linear.Vec;
/**
* This interface defines the method of updating some weight vector using a
* gradient and a learning rate. The method may then apply its own set of
* learning rates on top of the given learning rate in order to accelerate
* convergence in general or for specific conditions / methods.
*
* @author Edward Raff
*/
public interface GradientUpdater extends Serializable
{
/**
* Updates the weight vector {@code x} such that <i> x = x-ηf(grad)</i>,
* where f(grad) is some function on the gradient that effectively returns a
* new vector. It is not necessary for the internal implementation to ever
* explicitly form any of these objects, so long as {@code x} is mutated to
* have the correct result.
* @param w the vector to mutate such that is has been updated by the
* gradient
* @param grad the gradient to update the weight vector {@code x} from
* @param eta the learning rate to apply
*/
public void update(Vec w, Vec grad, double eta);
/**
* Updates the weight vector {@code x} such that <i> x = x-ηf(grad)</i>,
* where f(grad) is some function on the gradient that effectively returns a
* new vector. It is not necessary for the internal implementation to ever
* explicitly form any of these objects, so long as {@code x} is mutated to
* have the correct result. <br>
* <br>
* This version of the update method includes two extra parameters to make
* it easer to use when a scalar bias term is also used
*
* @param w the vector to mutate such that is has been updated by the
* gradient
* @param grad the gradient to update the weight vector {@code x} from
* @param eta the learning rate to apply
* @param bias the bias term of the vector
* @param biasGrad the gradient for the bias term
* @return the value to change the bias by, the update being
* {@code bias = bias - returnValue}
*/
public double update(Vec w, Vec grad, double eta, double bias, double biasGrad);
/**
* Sets up this updater to update a weight vector of dimension {@code d}
* by a gradient of the same dimension
* @param d the dimension of the weight vector that will be updated
*/
public void setup(int d);
public GradientUpdater clone();
}
| 2,451 | 40.559322 | 84 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/stochastic/NAdaGrad.java | /*
* Copyright (C) 2016 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.math.optimization.stochastic;
import java.util.Arrays;
import jsat.linear.*;
/**
* Normalized AdaGrad provides an adaptive learning rate for each individual
* feature, and is mostly scale invariant to the data distribution. NAdaGrad is
* meant for online stochastic learning where the update is obtained from one
* dataum at a time, and it relies on the gradient being a scalar multiplication
* of the training data. If the gradient given us a {@link ScaledVector}, where
* the base vector is the datum, then NAdaGrad will work. If not the case,
* NAdaGrad will degenerate into something similar to normal {@link AdaGrad}.
* <br><br>
* The current implementation assumes that the bias term is always scaled
* correctly, and does normal AdaGrad on it.
* <br>
* See: Ross, S., Mineiro, P., & Langford, J. (2013). Normalized online
* learning. In Twenty-Ninth Conference on Uncertainty in Artificial
* Intelligence. Retrieved from
* <a href="http://arxiv.org/abs/1305.6646">here</a>
*
* @author Edward Raff
*/
public class NAdaGrad implements GradientUpdater
{
private static final long serialVersionUID = 5138675613579751777L;
private double[] G;
private double[] S;
private double N;
private double biasG;
private long t;
/**
* Creates a new NAdaGrad updater
*/
public NAdaGrad()
{
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public NAdaGrad(NAdaGrad toCopy)
{
if(toCopy.G != null)
this.G = Arrays.copyOf(toCopy.G, toCopy.G.length);
if(toCopy.S != null)
this.S = Arrays.copyOf(toCopy.S, toCopy.S.length);
this.biasG = toCopy.biasG;
this.N = toCopy.N;
this.t = toCopy.t;
}
@Override
public void update(Vec w, Vec grad, double eta)
{
update(w, grad, eta, 0, 0);
}
@Override
public double update(Vec w, Vec grad, double eta, double bias, double biasGrad)
{
if(grad instanceof ScaledVector)
{
t++;
//decompone our gradient back into parts, the multipler and raw datum
Vec x = ((ScaledVector)grad).getBase();
for(IndexValue iv : x)
{
final int indx = iv.getIndex();
final double abs_x_i = Math.abs(iv.getValue());
if(abs_x_i > S[indx])//(a)
{
w.set(indx, (w.get(indx)*S[indx])/abs_x_i);
S[indx] = abs_x_i;
}
//skip step (b) for simplicity since grad was already given to us
//(c)
N += abs_x_i*abs_x_i/(S[indx]*S[indx]);
}
double eta_roled = -eta*Math.sqrt(t/(N+1e-6));
for(IndexValue iv : grad)
{
final int indx = iv.getIndex();
final double grad_i = iv.getValue();
G[indx] += grad_i*grad_i;
final double g_ii = G[indx];
w.increment(indx, eta_roled*grad_i/(S[indx]*Math.sqrt(g_ii)));
}
double biasUpdate = eta*biasGrad/Math.sqrt(biasG);
biasG += biasGrad*biasGrad;
return biasUpdate;
}
else//lets degenerate into something at least similar to AdaGrad
{
double eta_roled = -eta*Math.sqrt((t+1)/Math.max(N, t+1));
for(IndexValue iv : grad)
{
final int indx = iv.getIndex();
final double grad_i = iv.getValue();
G[indx] += grad_i*grad_i;
final double g_ii = G[indx];
w.increment(indx, eta_roled*grad_i/(Math.max(S[indx], 1.0)*Math.sqrt(g_ii)));
}
double biasUpdate = eta*biasGrad/Math.sqrt(biasG);
biasG += biasGrad*biasGrad;
return biasUpdate;
}
}
@Override
public NAdaGrad clone()
{
return new NAdaGrad(this);
}
@Override
public void setup(int d)
{
G = new double[d];
S = new double[d];
biasG = 1;
t = 0;
}
}
| 4,952 | 30.547771 | 93 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/stochastic/RMSProp.java | package jsat.math.optimization.stochastic;
import jsat.linear.DenseVector;
import jsat.linear.IndexValue;
import jsat.linear.ScaledVector;
import jsat.linear.Vec;
/**
* rmsprop is an adpative learning weight scheme proposed by Geoffrey Hinton.
* Provides an adaptive learning rate for each individual feature
*
* @author Edward Raff
*/
public class RMSProp implements GradientUpdater
{
private static final long serialVersionUID = 3512851084092042727L;
private double rho;
private Vec daigG;
private double biasG;
/**
* Creates a new RMSProp updater that uses a decay rate of 0.9
*/
public RMSProp()
{
this(0.9);
}
/**
* Creates a new RMSProp updater
* @param rho the decay rate to use
*/
public RMSProp(double rho)
{
setRho(rho);
}
/**
* Sets the decay rate used by rmsprop. Lower values focus more on the
* current gradient, where higher values incorporate a longer history.
*
* @param rho the decay rate in (0, 1) to use
*/
public void setRho(double rho)
{
if(rho <= 0 || rho >= 1 || Double.isNaN(rho))
throw new IllegalArgumentException("Rho should be a value in (0, 1) not " + rho);
this.rho = rho;
}
/**
*
* @return the decay rate parameter to use
*/
public double getRho()
{
return rho;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public RMSProp(RMSProp toCopy)
{
if(toCopy.daigG != null)
this.daigG = toCopy.daigG.clone();
this.rho = toCopy.rho;
this.biasG = toCopy.biasG;
}
@Override
public void update(Vec x, Vec grad, double eta)
{
update(x, grad, eta, 0, 0);
}
@Override
public double update(Vec x, Vec grad, double eta, double bias, double biasGrad)
{
daigG.mutableMultiply(rho);
for(IndexValue iv : grad)
{
final int indx = iv.getIndex();
final double grad_i = iv.getValue();
daigG.increment(indx, (1-rho)*grad_i*grad_i);
double g_iiRoot = Math.max(Math.sqrt(daigG.get(indx)), Math.abs(grad_i));//tiny grad sqrd could result in zero
x.increment(indx, -eta*grad_i/g_iiRoot);
}
biasG *= rho;
biasG += (1-rho)*biasGrad*biasGrad;
double g_iiRoot = Math.max(Math.sqrt(biasG), Math.abs(biasGrad));//tiny grad sqrd could result in zero
return eta*biasGrad/g_iiRoot;
}
@Override
public RMSProp clone()
{
return new RMSProp(this);
}
@Override
public void setup(int d)
{
daigG = new ScaledVector(new DenseVector(d));
biasG = 0;
}
}
| 2,779 | 23.60177 | 122 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/stochastic/Rprop.java | /*
* Copyright (C) 2015 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.math.optimization.stochastic;
import java.util.Arrays;
import jsat.linear.IndexValue;
import jsat.linear.Vec;
/**
* The Rprop algorithm provides adaptive learning rates using only first order
* information. Rprop works best with the true gradient, and may not work well
* when using stochastic gradients.<br>
* <br>
* See: Riedmiller, M., & Braun, H. (1993). <i>A direct adaptive method for
* faster backpropagation learning: the RPROP algorithm</i>. IEEE International
* Conference on Neural Networks, 1(3), 586–591. doi:10.1109/ICNN.1993.298623
*
* @author Edward Raff
*/
public class Rprop implements GradientUpdater
{
private double eta_pos = 1.2;
private double eta_neg = 0.5;
private double eta_start = 0.1;
private double eta_max = 50;
private double eta_min = 1e-6;
/**
* holds what would be w^(t-1)
*/
private double[] prev_w;
private double[] prev_grad;
private double[] cur_eta;
private double prev_grad_bias;
private double cur_eta_bias;
private double prev_bias;
/**
* Creates a new Rprop instance for gradient updating
*/
public Rprop()
{
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public Rprop(Rprop toCopy)
{
if (toCopy.prev_grad != null)
this.prev_grad = Arrays.copyOf(toCopy.prev_grad, toCopy.prev_grad.length);
if (toCopy.cur_eta != null)
this.cur_eta = Arrays.copyOf(toCopy.cur_eta, toCopy.cur_eta.length);
if (toCopy.prev_w != null)
this.prev_w = Arrays.copyOf(toCopy.prev_w, toCopy.prev_w.length);
this.prev_grad_bias = toCopy.prev_grad_bias;
this.cur_eta_bias = toCopy.cur_eta_bias;
this.prev_bias = toCopy.prev_bias;
}
@Override
public void update(Vec w, Vec grad, double eta)
{
update(w, grad, eta, 0, 0);
}
@Override
public double update(Vec w, Vec grad, double eta, double bias, double biasGrad)
{
for(IndexValue iv : grad)
{
final int i = iv.getIndex();
final double g_i = iv.getValue();
final double g_prev = prev_grad[i];
final double w_i = w.get(i);
prev_grad[i] = g_i;
final double sign_g_i = Math.signum(g_i);
final double sign_g_prev = Math.signum(g_prev);
if(sign_g_i == 0 || sign_g_prev == 0)
{
double eta_i = cur_eta[i];
w.increment(i, -sign_g_i*eta_i*eta);
}
else if(sign_g_i == sign_g_prev)
{
double eta_i = cur_eta[i] = Math.min(cur_eta[i]*eta_pos, eta_max);
w.increment(i, -sign_g_i*eta_i*eta);
}
else//not equal, sign change
{
double eta_i = cur_eta[i] = Math.max(cur_eta[i]*eta_neg, eta_min);
w.increment(i, -prev_w[i]*eta_i*eta);
prev_grad[i] = 0;
}
prev_w[i] = w_i;
}
//and again for the bias term
if(bias != 0 && biasGrad != 0)
{
double toRet;
final double g_i = biasGrad;
final double g_prev = prev_grad_bias;
final double w_i = bias;
prev_grad_bias = g_i;
final double sign_g_i = Math.signum(g_i);
final double sign_g_prev = Math.signum(g_prev);
if(sign_g_i == 0 || sign_g_prev == 0)
{
double eta_i = cur_eta_bias;
toRet = sign_g_i*eta_i;
}
else if(sign_g_i == sign_g_prev)
{
double eta_i = cur_eta_bias = Math.min(cur_eta_bias*eta_pos, eta_max);
toRet = sign_g_i*eta_i;
}
else//not equal, sign change
{
double eta_i = cur_eta_bias = Math.max(cur_eta_bias*eta_neg, eta_min);
prev_grad_bias = 0;
toRet = -prev_bias*eta_i;
}
prev_bias = w_i;
return toRet*eta;
}
return 0;
}
@Override
public void setup(int d)
{
prev_grad = new double[d];
cur_eta = new double[d];
Arrays.fill(cur_eta, eta_start);
prev_w = new double[d];
cur_eta_bias = eta_start;
prev_grad_bias = 0;
prev_bias = 0;
}
@Override
public Rprop clone()
{
return new Rprop(this);
}
}
| 5,309 | 29.170455 | 86 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/stochastic/SGDMomentum.java | package jsat.math.optimization.stochastic;
import jsat.linear.DenseVector;
import jsat.linear.ScaledVector;
import jsat.linear.Vec;
/**
* Performs unaltered Stochastic Gradient Decent updates using either standard
* or Nestrov momentum. <br>
* <br>
* See:<br>
* <ul>
* <li>Bengio, Y., Boulanger-Lewandowski, N.,&Pascanu, R. (2013). <i>Advances
* in optimizing recurrent networks</i>. In 2013 IEEE International Conference
* on Acoustics, Speech and Signal Processing (pp. 8624–8628). IEEE.
* doi:10.1109/ICASSP.2013.6639349</li>
* <li>Sutskever, I., Martens, J., Dahl, G.,&Hinton, G. (2013). <i>On the
* importance of initialization and momentum in deep learning</i>. JMLR W&CP,
* 28, 1139–1147.</li>
* </ul>
* @author Edward Raff
*/
public class SGDMomentum implements GradientUpdater
{
private static final long serialVersionUID = -3837883539010356899L;
private double momentum;
private boolean nestrov;
private Vec velocity;
private double biasVelocity;
/**
* Creates a new SGD with Momentum learner
* @param momentum the amount of momentum to use
* @param nestrov {@code true} to use Nestrov momentum, {@code false} for
* standard.
*/
public SGDMomentum(double momentum, boolean nestrov)
{
setMomentum(momentum);
this.nestrov = nestrov;
}
/**
* Creates a new SGD with Nestrov Momentum learner
* @param momentum the amount of momentum to use
*/
public SGDMomentum(double momentum)
{
this(momentum, true);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public SGDMomentum(SGDMomentum toCopy)
{
this.momentum = toCopy.momentum;
if(toCopy.velocity != null)
this.velocity = toCopy.velocity.clone();
this.biasVelocity = toCopy.biasVelocity;
}
/**
* Sets the momentum for accumulating gradients.
* @param momentum the momentum buildup term in (0, 1)
*/
public void setMomentum(double momentum)
{
if(momentum <= 0 || momentum >= 1 || Double.isNaN(momentum))
throw new IllegalArgumentException("Momentum must be in (0,1) not " + momentum);
this.momentum = momentum;
}
/**
*
* @return the momentum buildup term
*/
public double getMomentum()
{
return momentum;
}
@Override
public void update(Vec x, Vec grad, double eta)
{
update(x, grad, eta, 0.0, 0.0);
}
@Override
public double update(Vec x, Vec grad, double eta, double bias, double biasGrad)
{
double biasUpdate;
if (nestrov)
{
//update
x.mutableAdd(momentum * momentum, velocity);
x.mutableSubtract((1 + momentum) * eta, grad);
biasUpdate = -momentum*momentum*biasVelocity + (1+momentum)*eta*biasGrad;
}
else//clasic momentum
{
//update
x.mutableAdd(momentum, velocity);
x.mutableSubtract(eta, grad);
biasUpdate = -momentum*biasVelocity + eta*biasGrad;
}
//velocity
velocity.mutableMultiply(momentum);
velocity.mutableSubtract(eta, grad);
biasVelocity = biasVelocity*momentum - eta*biasGrad;
return biasUpdate;
}
@Override
public SGDMomentum clone()
{
return new SGDMomentum(this);
}
@Override
public void setup(int d)
{
velocity = new ScaledVector(new DenseVector(d));
biasVelocity = 0;
}
}
| 3,603 | 26.097744 | 92 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/optimization/stochastic/SimpleSGD.java | package jsat.math.optimization.stochastic;
import jsat.linear.Vec;
/**
* Performs unaltered Stochastic Gradient Decent updates computing
* <i>x = x- η grad</i><br>
* <br>
* Because the SimpleSGD requires no internal state, it is not necessary to call
* {@link #setup(int) }.
*
* @author Edward Raff
*/
public class SimpleSGD implements GradientUpdater
{
private static final long serialVersionUID = 4022442467298319553L;
/**
* Creates a new SGD updater
*/
public SimpleSGD()
{
}
@Override
public void update(Vec x, Vec grad, double eta)
{
x.mutableSubtract(eta, grad);
}
@Override
public double update(Vec x, Vec grad, double eta, double bias, double biasGrad)
{
x.mutableSubtract(eta, grad);
return eta*biasGrad;
}
@Override
public SimpleSGD clone()
{
return new SimpleSGD();
}
@Override
public void setup(int d)
{
//no setup to be done
}
}
| 1,000 | 17.886792 | 83 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/rootfinding/Bisection.java |
package jsat.math.rootfinding;
import jsat.math.Function1D;
/**
* Provides an implementation of the Bisection method of root finding.
* @author Edward Raff
*/
public class Bisection implements RootFinder
{
private static final long serialVersionUID = -8107160048637997385L;
/**
* Performs root finding on the function {@code f}.
*
* @param a the left bound on the root (i.e., f(a) < 0)
* @param b the right bound on the root (i.e., f(b) > 0)
* @param f the function to find the root of
* @return the value of variable {@code pos} that produces a zero value
* output
*/
public static double root(double a, double b, Function1D f)
{
return root(1e-15, a, b, f);
}
/**
* Performs root finding on the function {@code f}.
*
* @param eps the desired accuracy of the result
* @param a the left bound on the root (i.e., f(a) < 0)
* @param b the right bound on the root (i.e., f(b) > 0)
* @param f the function to find the root of
* @return the value of variable {@code pos} that produces a zero value
* output
*/
public static double root(double eps, double a, double b, Function1D f)
{
return root(eps, 1000, a, b, f);
}
/**
* Performs root finding on the function {@code f}.
*
* @param eps the desired accuracy of the result
* @param maxIterations the maximum number of iterations to perform
* @param a the left bound on the root (i.e., f(a) < 0)
* @param b the right bound on the root (i.e., f(b) > 0)
* @param f the function to find the root of
* @return the value of variable {@code pos} that produces a zero value
* output
*/
public static double root(double eps, int maxIterations, double a, double b, Function1D f)
{
if(b <= a)
throw new ArithmeticException("a musbt be < b for Bisection to work");
double fb = f.f(b);
double fa = f.f(a);
if(fa* fb >= 0)
throw new ArithmeticException("The given interval does not appear to bracket the root");
while(b - a > 2*eps && maxIterations-- > 0)
{
double midPoint = (a+b)*0.5;
double ftmp = f.f(midPoint);
if(fa*ftmp < 0)
{
b = midPoint;
fb = ftmp;
}
else if(fb * ftmp < 0)
{
a = midPoint;
fa = ftmp;
}
else
break;//We converged
}
return (a+b)*0.5;
}
@Override
public double root(double eps, int maxIterations, double[] initialGuesses, Function1D f)
{
return root(eps, maxIterations, initialGuesses[0], initialGuesses[1], f);
}
@Override
public int guessesNeeded()
{
return 2;
}
}
| 2,942 | 28.138614 | 100 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/rootfinding/RiddersMethod.java |
package jsat.math.rootfinding;
import static java.lang.Math.*;
import jsat.math.Function1D;
/**
* Provides an implementation of Ridder's method for root finding.
* @author Edward Raff
*/
public class RiddersMethod implements RootFinder
{
private static final long serialVersionUID = 8154909945080099018L;
/**
* Performs root finding on the function {@code f}.
*
* @param a the left bound on the root (i.e., f(a) < 0)
* @param b the right bound on the root (i.e., f(b) > 0)
* @param f the function to find the root of
* @return the value of variable {@code pos} that produces a zero value
* output
*/
public static double root(double a, double b, Function1D f)
{
return root(1e-15, a, b, f);
}
/**
* Performs root finding on the function {@code f}.
*
* @param eps the desired accuracy of the result
* @param a the left bound on the root (i.e., f(a) < 0)
* @param b the right bound on the root (i.e., f(b) > 0)
* @param f the function to find the root of
* @return the value of variable {@code pos} that produces a zero value
* output
*/
public static double root(double eps, double a, double b, Function1D f)
{
return root(eps, 1000, a, b, f);
}
/**
* Performs root finding on the function {@code f}.
*
* @param eps the desired accuracy of the result
* @param maxIterations the maximum number of iterations to perform
* @param a the left bound on the root (i.e., f(a) < 0)
* @param b the right bound on the root (i.e., f(b) > 0)
* @param f the function to find the root of
* @return the value of variable {@code pos} that produces a zero value
* output
*/
public static double root(double eps, int maxIterations, double a, double b, Function1D f)
{
double x1 = a;
double x2 = b;
double fx1 = f.f(x1);
double fx2 = f.f(x2);
double halfEps = eps*0.5;
if(fx1* fx2 >= 0)
throw new ArithmeticException("The given interval does not appear to bracket the root");
double dif = 1;//Measure the change interface values
while( abs(x1-x2) > eps && maxIterations-->0)
{
double x3 = (x1+x2)*0.5;
double fx3 = f.f(x3);
double x4 = x3+(x3-x1)*signum(fx1-fx2)*fx3/sqrt(fx3*fx3-fx1*fx2);
double fx4 = f.f(x4);
if(fx3 * fx4 < 0)
{
x1 = x3;
fx1 = fx3;
x2 = x4;
fx2 = fx4;
}
else if(fx1 * fx4 < 0)
{
dif = abs(x4 - x2);
if(dif <= halfEps)//WE are no longer updating, return the value
return x4;
x2 = x4;
fx2 = fx4;
}
else
{
dif = abs(x4 - x1);
if(dif <= halfEps)//WE are no longer updating, return the value
return x4;
x1 = x4;
fx1 = fx4;
}
}
return x2;
}
@Override
public double root(double eps, int maxIterations, double[] initialGuesses, Function1D f)
{
return root(eps, maxIterations, initialGuesses[0], initialGuesses[1], f);
}
@Override
public int guessesNeeded()
{
return 2;
}
}
| 3,529 | 28.416667 | 100 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/rootfinding/RootFinder.java |
package jsat.math.rootfinding;
import java.io.Serializable;
import jsat.math.Function1D;
/**
* This interface defines a general contract for the numerical computation of a
* root of a given function. A root of a function {@code f} is a point {@code x}
* for which {@code f(x) = 0}. A function may have any number of roots
* (including no roots).
*
* @author Edward Raff
*/
public interface RootFinder extends Serializable
{
/**
* Attempts to numerical compute the root of a given function, such that f(<tt>args</tt>) = 0. Only one variable may be altered at a time
*
* @param eps the accuracy desired for the solution
* @param maxIterations the maximum number of steps allowed before forcing a return of the current solution.
* @param initialGuesses an array containing the initial guess values
* @param f the function to find the root of
* @param pos the index of the argument that will be allowed to alter in order to find the root. Starts from 0
* @param args the values to be passed to the function as arguments
* @return the value of the variable at the index <tt>pos</tt> that makes the function return 0
*/
public double root(double eps, int maxIterations, double[] initialGuesses, Function1D f);
/**
* Different root finding methods require different numbers of initial guesses.
* Some root finding methods require 2 guesses, each with values of opposite
* sign so that they bracket the root. Others just need any 2 initial guesses
* sufficiently close to the root. This method simply returns the number of
* guesses that are needed.
*
* @return the number of initial guesses this root finding method needs
*/
public int guessesNeeded();
}
| 1,783 | 41.47619 | 141 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/rootfinding/Secant.java |
package jsat.math.rootfinding;
import jsat.math.Function1D;
/**
* This class provides an implementation of the Secant method of finding roots
* of functions.
*
* @author Edward Raff
*/
public class Secant implements RootFinder
{
private static final long serialVersionUID = -5175113107084930582L;
/**
* Performs root finding on the function {@code f}.
*
* @param a the left bound on the root (i.e., f(a) < 0)
* @param b the right bound on the root (i.e., f(b) > 0)
* @param f the function to find the root of
* @return the value of variable {@code pos} that produces a zero value
* output
*/
public static double root(double a, double b, Function1D f)
{
return root(1e-15, a, b, f);
}
/**
* Performs root finding on the function {@code f}.
*
* @param eps the desired accuracy of the result
* @param a the left bound on the root (i.e., f(a) < 0)
* @param b the right bound on the root (i.e., f(b) > 0)
* @param f the function to find the root of
* @return the value of variable {@code pos} that produces a zero value
* output
*/
public static double root(double eps, double a, double b, Function1D f)
{
return root(eps, 1000, a, b, f);
}
/**
* Performs root finding on the function {@code f}.
*
* @param eps the desired accuracy of the result
* @param maxIterations the maximum number of iterations to perform
* @param a the left bound on the root (i.e., f(a) < 0)
* @param b the right bound on the root (i.e., f(b) > 0)
* @param f the function to find the root of
* @return the value of variable {@code pos} that produces a zero value
* output
*/
public static double root(double eps, int maxIterations, double a, double b, Function1D f)
{
double x0 = a;
double x1 = b;
/**
* f(x0)
*/
double fx0 = f.f(x0);
while(Math.abs(x1-x0) > 2*eps && maxIterations-- > 0)
{
double fx1 = f.f(x1);
double nextX = x1 - fx1*(x1-x0)/(fx1-fx0);
x0 = x1;
fx0 = fx1;
x1 = nextX;
}
return x1;
}
@Override
public double root(double eps, int maxIterations, double[] initialGuesses, Function1D f)
{
return root(eps, maxIterations, initialGuesses[0], initialGuesses[1], f);
}
@Override
public int guessesNeeded()
{
return 2;
}
}
| 2,585 | 26.806452 | 94 | java |
JSAT | JSAT-master/JSAT/src/jsat/math/rootfinding/Zeroin.java |
package jsat.math.rootfinding;
import static java.lang.Math.*;
import jsat.math.Function1D;
/**
* This class provides an implementation of the popular ZeroIn root finder.
*
* @author Edward Raff
*/
public class Zeroin implements RootFinder
{
private static final long serialVersionUID = -8359510619103768778L;
/**
* Performs root finding on the function {@code f}.
*
* @param a the left bound on the root (i.e., f(a) < 0)
* @param b the right bound on the root (i.e., f(b) > 0)
* @param f the function to find the root of
* @return the value of variable {@code pos} that produces a zero value
* output
*/
public static double root(double a, double b, Function1D f)
{
return root(1e-15, 1000, a, b, f);
}
/**
* Performs root finding on the function {@code f}.
*
* @param eps the desired accuracy of the result
* @param a the left bound on the root (i.e., f(a) < 0)
* @param b the right bound on the root (i.e., f(b) > 0)
* @param f the function to find the root of
* @return the value of variable {@code pos} that produces a zero value
* output
*/
public static double root(double eps, double a, double b, Function1D f)
{
return root(eps, 1000, a, b, f);
}
/**
* Performs root finding on the function {@code f}.
*
* @param eps the desired accuracy of the result
* @param maxIterations the maximum number of iterations to perform
* @param a the left bound on the root (i.e., f(a) < 0)
* @param b the right bound on the root (i.e., f(b) > 0)
* @param f the function to find the root of
* @return the value of variable {@code pos} that produces a zero value
* output
*/
public static double root(double eps, int maxIterations, double a, double b, Function1D f)
{
/*
* Code has few comments, taken fro algorithum descriptoin http://en.wikipedia.org/wiki/Brent%27s_method#Algorithm ,
* which is from Brent's book (according to comments, I would like to get the book either way)
*
*/
double fa = f.f(a);
double fb = f.f(b);
if(abs(fa-0) < 2*eps)
return a;
if(abs(fb-0) < 2*eps)
return b;
if(fa * fb >= 0)
throw new ArithmeticException("The given search interval does not appear to contain the root ");
if(abs(fa) < abs(fb)) //swap
{
double tmp = a;
a = b;
b = tmp;
tmp = fa;
fa = fb;
fb = tmp;
}
double c = a;
double fc = fa;
boolean mflag = true;
double s;
double d = 0;//inital value dosnt matter, and will not be used
double fs;
do
{
if(fa != fc && fb != fc)//inverse quadratic interpolation
{
s = a*fb*fc/( (fa-fb)*(fa-fc) ) + b*fa*fc/( (fb-fa)*(fb-fc) ) + c*fa*fb/( (fc-fa)*(fc-fb) );
}
else//secant rule
{
s = b - fb*(b-a)/(fb-fa);
}
//Determin wethor or not we must use bisection
boolean cond1 = (s - ( 3 * a + b) / 4 ) * ( s - b) >= 0;
boolean cond2 = mflag && (abs(s - b) >= (abs(b - c) / 2));
boolean cond3 = !mflag && (abs(s - b) >= (abs(c - d) / 2));
boolean cond4 = mflag && (abs(b-c) < 2*eps);
boolean cond5 = !mflag && abs(c-d) < 2*eps;
if(cond1 || cond2 || cond3 || cond4 || cond5)//Bisection must be used
{
s = (a+b)/2;
mflag = true;
}
else
mflag = false;
fs = f.f(s);
d = c;
c = b;
//adjust the interval accordingly
if(fa*fs < 0)
{
b = s;
fb = fs;
}
else
{
a = s;
fa = fs;
}
if(abs(fa) < abs(fb))//swap
{
double tmp = a;
a = b;
b = tmp;
tmp = fa;
fa = fb;
fb = tmp;
}
}
while( fb != 0.0 && fs != 0.0 && abs(b-a) > 2*eps && maxIterations-- > 0);
return b;
}
@Override
public double root(double eps, int maxIterations, double[] initialGuesses, Function1D f)
{
return root(eps, maxIterations, initialGuesses[0], initialGuesses[1], f);
}
@Override
public int guessesNeeded()
{
return 2;
}
}
| 4,911 | 27.725146 | 124 | java |
JSAT | JSAT-master/JSAT/src/jsat/outlier/DensityOutlier.java | /*
* Copyright (C) 2018 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.outlier;
import java.util.Arrays;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.distributions.multivariate.MultivariateDistribution;
import jsat.distributions.multivariate.NormalMR;
import jsat.utils.concurrent.ParallelUtils;
/**
* This class provides an outlier detector based upon density estimation.
*
* @author Edward Raff
*/
public class DensityOutlier implements Outlier
{
private double outlierFraction;
private MultivariateDistribution density;
/**
* Threshold for what counts as an outlier. Any value less than or equal to
* this threshold will be considered an outlier.
*/
private double threshold;
public DensityOutlier()
{
this(0.05);
}
public DensityOutlier(double outlierFraction)
{
this(outlierFraction, new NormalMR());
}
public DensityOutlier(double outlierFraction, MultivariateDistribution density)
{
this.outlierFraction = outlierFraction;
this.density = density;
}
public DensityOutlier(DensityOutlier toCopy)
{
this(toCopy.outlierFraction, toCopy.density.clone());
this.threshold = toCopy.threshold;
}
public void setOutlierFraction(double outlierFraction)
{
this.outlierFraction = outlierFraction;
}
public double getOutlierFraction()
{
return outlierFraction;
}
public void setDensityDistribution(MultivariateDistribution density)
{
this.density = density;
}
public MultivariateDistribution getDensityDistribution()
{
return density;
}
@Override
public void fit(DataSet d, boolean parallel)
{
density.setUsingData(d, parallel);
double[] scores = new double[d.size()];
ParallelUtils.run(parallel, scores.length, (start, end)->
{
for(int i = start; i < end; i++)
scores[i] = density.logPdf(d.getDataPoint(i).getNumericalValues());
});
Arrays.sort(scores);
threshold = scores[(int)(scores.length*outlierFraction)];
}
@Override
public double score(DataPoint x)
{
double logPDF = density.logPdf(x.getNumericalValues());
return logPDF - threshold;
}
@Override
protected DensityOutlier clone() throws CloneNotSupportedException
{
return new DensityOutlier(this);
}
}
| 3,120 | 26.619469 | 83 | java |
JSAT | JSAT-master/JSAT/src/jsat/outlier/IsolationForest.java | /*
* Copyright (C) 2018 edraff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.outlier;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.IndexValue;
import jsat.linear.Vec;
import jsat.math.FastMath;
import jsat.math.SpecialMath;
import jsat.utils.IntList;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
*
* @author edraff
*/
public class IsolationForest implements Outlier
{
private int trees = 100;
private double subSamplingSize = 256;
List<iTreeNode> roots = new ArrayList<>();
public IsolationForest()
{
}
public IsolationForest(IsolationForest toCopy)
{
this.trees = toCopy.trees;
this.subSamplingSize = toCopy.subSamplingSize;
this.roots = new ArrayList<>();
for(iTreeNode root : toCopy.roots)
this.roots.add(root.clone());
}
/**
* Equation 1 in the Isolation Forest paper
* @param n
* @return
*/
private static double c(double n)
{
return 2*SpecialMath.harmonic(n-1)-(2*(n-1)/n);
}
@Override
public void fit(DataSet d, boolean parallel)
{
for(int i =0; i < trees; i++)
roots.add(new iTreeNode());
int l = (int) Math.ceil(Math.log(subSamplingSize)/Math.log(2));
int D = d.getNumNumericalVars();
//Build all the trees
ParallelUtils.streamP(roots.stream(), parallel).forEach(r->
{
r.build(0, l, d, IntList.range(d.size()), new double[D], new double[D]);
});
}
@Override
public double score(DataPoint x)
{
double e_h_x = roots.stream().
mapToDouble(r->r.pathLength(x.getNumericalValues(), 0))
.average().getAsDouble();
//an anomaly score is produced by computing s(x,ψ) in Equation 2
double anomScore = FastMath.pow2(-e_h_x/c(subSamplingSize));
//anomScore will be in the range [0, 1]
//values > 0.5 are considered anomolies
//so just return 0.5-anomScore to fit the interface defintion
return 0.5-anomScore;
}
@Override
protected IsolationForest clone() throws CloneNotSupportedException
{
return new IsolationForest(this);
}
private class iTreeNode implements Serializable
{
iTreeNode leftChild;
iTreeNode rightChild;
double size = 0;
double splitVal;
int splitAtt;
public iTreeNode()
{
}
public iTreeNode(iTreeNode toCopy)
{
this.leftChild = new iTreeNode(toCopy.leftChild);
this.rightChild = new iTreeNode(toCopy.rightChild);
this.splitVal = toCopy.splitVal;
this.splitAtt = toCopy.splitAtt;
}
@Override
protected iTreeNode clone()
{
return new iTreeNode(this);
}
public void build(int e, int l, DataSet source, IntList X, double[] minVals, double[] maxVals)
{
if(e >= l || X.size() <= 1)
{
if(X.isEmpty())//super rare, rng guesses the min value itself
this.size = 1;
else//use average
this.size = X.stream().mapToDouble(s->source.getWeight(s)).sum();
//else, size stays zero
return;
}
//else
int D = source.getNumNumericalVars();
Arrays.fill(minVals, 0.0);
Arrays.fill(maxVals, 0.0);
//find the min-max range for each feature
X.stream().forEach(d->
{
for(IndexValue iv : source.getDataPoint(d).getNumericalValues())
{
int i = iv.getIndex();
minVals[i] = Math.min(minVals[i], iv.getValue());
maxVals[i] = Math.max(maxVals[i], iv.getValue());
}
});
//how many features are valid choices?
int candiadates = 0;
for(int i = 0; i < D; i++)
if(minVals[i] != maxVals[i])
candiadates++;
//select the q'th feature with a non-zero spread
int q_candidate = RandomUtil.getLocalRandom().nextInt(candiadates);
int q = 0;
for(int i = 0; i <D; i++)
if(minVals[i] != maxVals[i])
if(--q_candidate == 0)
{
q = i;
break;
}
//pick random split value between min & max
splitVal = RandomUtil.getLocalRandom().nextDouble();
splitVal = minVals[q] + (maxVals[q]-minVals[q])*splitVal;
IntList X_l = new IntList();
IntList X_r = new IntList();
for(int x : X)
if(source.getDataPoint(x).getNumericalValues().get(q) < splitVal)
X_l.add(x);
else
X_r.add(x);
splitAtt = q;
this.leftChild = new iTreeNode();
this.leftChild.build(e+1, l, source, X_l, minVals, maxVals);
this.rightChild = new iTreeNode();
this.rightChild.build(e+1, l, source, X_r, minVals, maxVals);
}
public double pathLength(Vec x, double e)
{
if(leftChild == null)//root node
return e + c(this.size);
//else
if(x.get(splitAtt) < splitVal)
return leftChild.pathLength(x, e+1);
else
return rightChild.pathLength(x, e+1);
}
}
}
| 6,553 | 30.358852 | 102 | java |
JSAT | JSAT-master/JSAT/src/jsat/outlier/LOF.java | /*
* Copyright (C) 2018 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.outlier;
import java.util.ArrayList;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.linear.vectorcollection.DefaultVectorCollection;
import jsat.linear.vectorcollection.VectorCollection;
import jsat.utils.DoubleList;
import jsat.utils.IntList;
import jsat.utils.concurrent.ParallelUtils;
/**
* This class implements the Local Outlier Factor (LOF) algorithm for outlier detection.
* @author Edward Raff <[email protected]>
*/
public class LOF implements Outlier
{
int minPnts;
private DistanceMetric distanceMetric;
VectorCollection<Vec> vc = new DefaultVectorCollection<>();
/**
* the points in this collection
*/
private List<Vec> X;
/**
* Stores the distance of an index in X to it's k'th (minPnts) nearest neighbor.
*/
private double[] k_distance;
private double[] lrd_internal;
public LOF()
{
this(15);
}
public LOF(int minPnts)
{
this(minPnts, new EuclideanDistance());
}
public LOF(int minPnts, DistanceMetric dm)
{
setMinPnts(minPnts);
setDistanceMetric(dm);
}
public void setMinPnts(int minPnts)
{
this.minPnts = minPnts;
}
public int getMinPnts()
{
return minPnts;
}
public void setDistanceMetric(DistanceMetric distanceMetric)
{
this.distanceMetric = distanceMetric;
}
public DistanceMetric getDistanceMetric()
{
return distanceMetric;
}
@Override
public void fit(DataSet d, boolean parallel)
{
X = d.getDataVectors();
vc.build(parallel, X, distanceMetric);
int N = X.size();
k_distance = new double[N];
List<List<Integer>> all_knn = new ArrayList<>();
List<List<Double>> all_knn_dists = new ArrayList<>();
vc.search(X, minPnts+1, all_knn, all_knn_dists, parallel);//+1 to avoid self distance
ParallelUtils.run(parallel, N, (start, end)->
{
for(int i = start; i < end; i++)
k_distance[i] = all_knn_dists.get(i).get(minPnts);
});
lrd_internal = new double[N];
ParallelUtils.run(parallel, N, (start, end)->
{
for(int i = start; i < end; i++)
{
double reachSum = 0;
for(int j_indx = 1; j_indx < minPnts+1; j_indx++)
{
int neighbor = all_knn.get(i).get(j_indx);
double dist = all_knn_dists.get(i).get(j_indx);
reachSum += Math.max(k_distance[neighbor], dist);
}
//lrd_internal[i] = 1.0/(reachSum/minPnts);
lrd_internal[i] = minPnts/reachSum;
}
});
}
double lrd(Vec a, List<Double> qi)
{
return 0;
}
@Override
public double score(DataPoint x)
{
IntList knn = new IntList(minPnts);
DoubleList dists = new DoubleList(minPnts);
vc.search(x.getNumericalValues(), minPnts, knn, dists);
double lof = 0;
double lrd_x = 0;
for(int i_indx = 0; i_indx < minPnts; i_indx++)
{
int neighbor = knn.get(i_indx);
double dist = dists.get(i_indx);
double reach_dist = Math.max(k_distance[neighbor], dist);
lof += lrd_internal[neighbor];
lrd_x += reach_dist;
}
//lrd_x now has the local reachability distance of the query x
lrd_x = minPnts/lrd_x;
//now compuate final LOF score
lof /= minPnts * lrd_x;
//lof, > 1 indicates outlier, <= 1 indicates inlier.
//to map to interface (negative = outlier), -1*(lof-1)
//use -1.25 b/c the boarder around 1 is kinda noisy
return -(lof-1.25);
}
}
| 4,842 | 27.321637 | 93 | java |
JSAT | JSAT-master/JSAT/src/jsat/outlier/LinearOCSVM.java | /*
* Copyright (C) 2018 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.outlier;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.DenseVector;
import jsat.linear.ScaledVector;
import jsat.linear.Vec;
import jsat.math.optimization.stochastic.AdaGrad;
import jsat.math.optimization.stochastic.GradientUpdater;
import jsat.utils.random.RandomUtil;
/**
* This class implements the One-Class SVM (OC-SVM) algorithm for outlier
* detection. This implementation works only in the primal or "linear" space. As
* such it works best when the data is sparse and high dimensional. If your data
* is dense and low dimensional, you may get better results by first applying a
* non-linear transformation to the data.
*
*
* See:
* <ul>
* <li>Schölkopf, B., Williamson, R., Smola, A., Shawe-Taylor, J., & Platt, J.
* (1999). <i>Support Vector Method for Novelty Detection</i>. In Advances in
* Neural Information Processing Systems 12 (pp. 582–588). Denver, CO.</li>
* <li>Manevitz, L. M., & Yousef, M. (2001). <i>One-class Svms for Document
* Classification</i>. J. Mach. Learn. Res., 2, 139–154. Retrieved from
* http://dl.acm.org/citation.cfm?id=944790.944808</li>
* </ul>
*
* @author Edward Raff <[email protected]>
*/
public class LinearOCSVM implements Outlier
{
private Vec w;
private double p;
private int max_epochs = 100;
private double learningRate = 0.01;
private double v = 0.05;
public void setV(double v)
{
this.v = v;
}
public double getV()
{
return v;
}
@Override
public void fit(DataSet d, boolean parallel)
{
Random rand = RandomUtil.getRandom();
List<Vec> X = d.getDataVectors();
int N = X.size();
w = new ScaledVector(new DenseVector(X.get(0).length()));
p = 0;
GradientUpdater gu = new AdaGrad();
gu.setup(w.length());
double cnt = 1/(v);
double prevLoss = Double.POSITIVE_INFINITY;
double curLoss = Double.POSITIVE_INFINITY;
for(int epoch = 0; epoch < max_epochs; epoch++)
{
Collections.shuffle(X, rand);
prevLoss = curLoss;
curLoss = 0;
for(int i = 0; i < X.size(); i++)
{
Vec x = X.get(i);
double loss = p - w.dot(x);
double p_delta = -1;
double x_mul = 0;
if(loss > 0)
{
p_delta += 1*cnt;
x_mul = -1*cnt;
}
curLoss += Math.max(0, loss);
p -= gu.update(w, new ScaledVector(x_mul, x), learningRate, p, p_delta);
w.mutableMultiply(1-learningRate);
}
// System.out.println("Epoch " + epoch + " " + curLoss + " " + (curLoss-prevLoss)/N);
if(Math.abs((curLoss-prevLoss)/N) < 1e-6*v)
break;//Convergence check
}
}
@Override
public double score(DataPoint x)
{
return w.dot(x.getNumericalValues())-p;
}
}
| 3,995 | 29.738462 | 96 | java |
JSAT | JSAT-master/JSAT/src/jsat/outlier/LoOP.java | /*
* Copyright (C) 2018 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.outlier;
import java.util.ArrayList;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.linear.Vec;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.linear.distancemetrics.EuclideanDistance;
import jsat.linear.vectorcollection.DefaultVectorCollection;
import jsat.linear.vectorcollection.VectorCollection;
import jsat.math.SpecialMath;
import jsat.utils.DoubleList;
import jsat.utils.IntList;
import jsat.utils.concurrent.ParallelUtils;
/**
* This class implements the Local Outlier Probabilities (LoOP) algorithm for outlier detection.
* @author Edward Raff <[email protected]>
*/
public class LoOP implements Outlier
{
int minPnts;
private double lambda = 3;
private DistanceMetric distanceMetric;
VectorCollection<Vec> vc = new DefaultVectorCollection<>();
/**
* Stores the "standard distance" of an index in X to its nearest neighbors
*/
private double[] standard_distance;
private double nPLOF;
public LoOP()
{
this(20);
}
public LoOP(int minPnts)
{
this(minPnts, new EuclideanDistance());
}
public LoOP(int minPnts, DistanceMetric dm)
{
setMinPnts(minPnts);
setDistanceMetric(dm);
}
public void setMinPnts(int minPnts)
{
this.minPnts = minPnts;
}
public int getMinPnts()
{
return minPnts;
}
public void setLambda(double lambda)
{
this.lambda = lambda;
}
public double getLambda()
{
return lambda;
}
public void setDistanceMetric(DistanceMetric distanceMetric)
{
this.distanceMetric = distanceMetric;
}
public DistanceMetric getDistanceMetric()
{
return distanceMetric;
}
@Override
public void fit(DataSet d, boolean parallel)
{
List<Vec> X = d.getDataVectors();
vc.build(parallel, X, distanceMetric);
int N = X.size();
standard_distance = new double[N];
List<List<Integer>> all_knn = new ArrayList<>();
List<List<Double>> all_knn_dists = new ArrayList<>();
vc.search(X, minPnts+1, all_knn, all_knn_dists, parallel);//+1 to avoid self distance
ParallelUtils.run(parallel, N, (start, end)->
{
for(int i = start; i < end; i++)
standard_distance[i] = Math.sqrt(all_knn_dists.get(i).stream()
.mapToDouble(z->z*z).sum()/minPnts+1e-6);
});
double[] plof_internal = new double[N];
nPLOF = ParallelUtils.run(parallel, N, (start, end)->
{
double sqrdPLOF = 0;
for(int i = start; i < end; i++)
{
double neighborSD = 0;
for(int j_indx = 1; j_indx < minPnts+1; j_indx++)
{
int neighbor = all_knn.get(i).get(j_indx);
neighborSD += standard_distance[neighbor];
}
plof_internal[i] = standard_distance[i]/(neighborSD/minPnts) - 1;
sqrdPLOF += plof_internal[i]*plof_internal[i];
}
return sqrdPLOF;
}, (a,b)->a+b);
nPLOF = Math.sqrt(nPLOF/N);
}
@Override
public double score(DataPoint x)
{
IntList knn = new IntList(minPnts);
DoubleList dists = new DoubleList(minPnts);
vc.search(x.getNumericalValues(), minPnts, knn, dists);
double e_pdist = 0;
double stndDist_q = 0;
for(int i_indx = 0; i_indx < minPnts; i_indx++)
{
int neighbor = knn.get(i_indx);
double dist = dists.get(i_indx);
e_pdist += standard_distance[neighbor];
stndDist_q += dist*dist;
}
//lrd_x now has the local reachability distance of the query x
stndDist_q = Math.sqrt(stndDist_q/minPnts+1e-6);
//normalize pdist of neighbors
e_pdist /= minPnts;
double plof_os = stndDist_q/e_pdist - 1;
double loop = Math.max(0, SpecialMath.erf(plof_os/(lambda * nPLOF * Math.sqrt(2))));
//loop, > 1/2 indicates outlier, <= 1/2 indicates inlier.
//to map to interface (negative = outlier), -1*(loop-1/2)
return -(loop-0.5);
}
}
| 5,236 | 27.308108 | 97 | java |
JSAT | JSAT-master/JSAT/src/jsat/outlier/Outlier.java | /*
* Copyright (C) 2018 Edward Raff <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.outlier;
import java.io.Serializable;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
/**
*
* @author Edward Raff <[email protected]>
*/
public interface Outlier extends Serializable
{
default public void fit(DataSet d)
{
fit(d, false);
}
public void fit(DataSet d, boolean parallel);
/**
* Returns an unbounded anomaly/outlier score. Negative values indicate the
* input is likely to be an outlier, and positive values that the input is
* likely to be an inlier.
*
* @param x
* @return
*/
public double score(DataPoint x);
default public boolean isOutlier(DataPoint x)
{
return score(x) < 0 ;
}
}
| 1,453 | 27.509804 | 79 | java |
JSAT | JSAT-master/JSAT/src/jsat/parameters/BooleanParameter.java | package jsat.parameters;
/**
* A boolean parameter that may be altered.
*
* @author Edward Raff
*/
public abstract class BooleanParameter extends Parameter
{
private static final long serialVersionUID = 4961692453234546675L;
/**
* Returns the current value for the parameter.
* @return the value for this parameter.
*/
abstract public boolean getValue();
/**
* Sets the value for this parameter.
* @return <tt>true</tt> if the value was set, <tt>false</tt> if the value
* was invalid, and thus ignored.
*/
abstract public boolean setValue(boolean val);
@Override
public String getValueString()
{
return Boolean.toString(getValue());
}
}
| 735 | 22 | 79 | java |
JSAT | JSAT-master/JSAT/src/jsat/parameters/DecayRateParameter.java | package jsat.parameters;
import java.util.Arrays;
import java.util.List;
import jsat.math.decayrates.DecayRate;
import jsat.math.decayrates.ExponetialDecay;
import jsat.math.decayrates.InverseDecay;
import jsat.math.decayrates.LinearDecay;
import jsat.math.decayrates.NoDecay;
/**
* A parameter for changing between the default {@link DecayRate decay rates}.
*
* @author Edward Raff
*/
public abstract class DecayRateParameter extends ObjectParameter<DecayRate>
{
private static final long serialVersionUID = -3751128637789053385L;
@Override
public List<DecayRate> parameterOptions()
{
return Arrays.asList(new NoDecay(), new LinearDecay(), new ExponetialDecay(), new InverseDecay());
}
@Override
public String getASCIIName()
{
return "Decay Rate";
}
}
| 811 | 22.882353 | 106 | java |
JSAT | JSAT-master/JSAT/src/jsat/parameters/DoubleParameter.java | package jsat.parameters;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.distributions.Distribution;
import jsat.regression.RegressionDataSet;
/**
* A double parameter that may be altered.
*
* @author Edward Raff
*/
public abstract class DoubleParameter extends Parameter
{
private static final long serialVersionUID = 4132422231433472554L;
/**
* Returns the current value for the parameter.
*
* @return the value for this parameter.
*/
abstract public double getValue();
/**
* Sets the value for this parameter.
* @return <tt>true</tt> if the value was set, <tt>false</tt> if the value
* was invalid, and thus ignored.
*/
abstract public boolean setValue(double val);
/**
* This method allows one to obtain a distribution that represents a
* reasonable "guess" at the range of values that would work for this
* parameter. If the DataSet is an instance of {@link ClassificationDataSet}
* or {@link RegressionDataSet}, the method may choose to assume that the
* value is being guessed for the specified task and change its behavior<br>
* <br>
* Providing a getGuess is not required, and returns {@code null} if
* guessing is not supported.
*
* @param data the data with which we want a reasonable guess for this
* parameter
* @return a distribution that represents a reasonable guess of a good value
* for this parameter given the input data
*/
public Distribution getGuess(DataSet data)
{
return null;
}
@Override
public String getValueString()
{
return Double.toString(getValue());
}
}
| 1,722 | 28.706897 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/parameters/GridSearch.java | package jsat.parameters;
import java.util.*;
import java.util.concurrent.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.classifiers.*;
import jsat.distributions.Distribution;
import jsat.exceptions.FailedToFitException;
import jsat.regression.*;
import jsat.utils.DoubleList;
import jsat.utils.FakeExecutor;
import jsat.utils.concurrent.ParallelUtils;
/**
* GridSearch is a simple method for tuning the parameters of a classification
* or regression algorithm. It naively tries all possible pairs of parameter
* values given. For this reason, it works best when only a small number of
* parameters need to be turned. <br>
* The model it takes must implement the {@link Parameterized} interface. By
* default, no parameters are selected for optimizations. This is because
* parameters value ranges are often algorithm specific. As such, the user must
* specify the parameters and the values to test using the <tt>addParameter</tt>
* methods.
*
* @author Edward Raff
*
* @see #addParameter(jsat.parameters.DoubleParameter, double[])
* @see #addParameter(jsat.parameters.IntParameter, int[])
*/
public class GridSearch extends ModelSearch
{
private static final long serialVersionUID = -1987196172499143753L;
/**
* The matching list of values we will test. This includes the integer
* parameters, which will have to be cast back and forth from doubles.
*/
private List<List<Double>> searchValues;
/**
* Use warm starts when possible
*/
private boolean useWarmStarts = true;
/**
* Creates a new GridSearch to tune the specified parameters of a regression
* model. The parameters still need to be specified by calling
* {@link #addParameter(jsat.parameters.DoubleParameter, double[]) }
*
* @param baseRegressor the regressor to tune the parameters of
* @param folds the number of folds of cross-validation to perform to
* evaluate each combination of parameters
* @throws FailedToFitException if the base regressor does not implement
* {@link Parameterized}
*/
public GridSearch(Regressor baseRegressor, int folds)
{
super(baseRegressor, folds);
searchValues = new ArrayList<List<Double>>();
}
/**
* Creates a new GridSearch to tune the specified parameters of a
* classification model. The parameters still need to be specified by
* calling
* {@link #addParameter(jsat.parameters.DoubleParameter, double[]) }
*
* @param baseClassifier the classifier to tune the parameters of
* @param folds the number of folds of cross-validation to perform to
* evaluate each combination of parameters
* @throws FailedToFitException if the base classifier does not implement
* {@link Parameterized}
*/
public GridSearch(Classifier baseClassifier, int folds)
{
super(baseClassifier, folds);
searchValues = new ArrayList<List<Double>>();
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public GridSearch(GridSearch toCopy)
{
super(toCopy);
this.useWarmStarts = toCopy.useWarmStarts;
if(toCopy.searchValues != null)
{
this.searchValues = new ArrayList<List<Double>>();
for(List<Double> ld : toCopy.searchValues)
{
List<Double> newVals = new DoubleList(ld);
this.searchValues.add(newVals);
}
}
}
/**
* This method will automatically populate the search space with parameters
* based on which Parameter objects return non-null distributions. Each
* parameter will be tested with 10 different values<br>
* <br>
* Note, using this method with Cross Validation has the potential for
* over-estimating the accuracy of results if the data set is actually used
* to for parameter guessing.<br>
* <br>
* It is possible for this method to return 0, indicating that no default
* parameters could be found. The intended interpretation is that there are
* no parameters that you <i>need</i> to tune to get good performance from
* the given model. Though there will be cases where the author has simply
* missed a class.
*
* @param data the data set to get parameter estimates from
* @return the number of parameters added
*/
public int autoAddParameters(DataSet data)
{
return autoAddParameters(data, 10);
}
/**
* This method will automatically populate the search space with parameters
* based on which Parameter objects return non-null distributions.<br>
* <br>
* Note, using this method with Cross Validation has the potential for
* over-estimating the accuracy of results if the data set is actually used
* to for parameter guessing.
*
* @param data the data set to get parameter estimates from
* @param paramsEach the number of parameters value to try for each parameter found
* @return the number of parameters added
*/
public int autoAddParameters(DataSet data, int paramsEach)
{
Parameterized obj;
if(baseClassifier != null)
obj = (Parameterized) baseClassifier;
else
obj = (Parameterized) baseRegressor;
int totalParms = 0;
for(Parameter param : obj.getParameters())
{
Distribution dist;
if (param instanceof DoubleParameter)
{
dist = ((DoubleParameter) param).getGuess(data);
if (dist != null)
totalParms++;
}
else if (param instanceof IntParameter)
{
dist = ((IntParameter) param).getGuess(data);
if (dist != null)
totalParms++;
}
}
if(totalParms < 1)
return 0;
double[] quantiles = new double[paramsEach];
for(int i = 0; i < quantiles.length; i++)
quantiles[i] = (i+1.0)/(paramsEach+1.0);
for(Parameter param : obj.getParameters())
{
Distribution dist;
if (param instanceof DoubleParameter)
{
dist = ((DoubleParameter) param).getGuess(data);
if (dist == null)
continue;
double[] vals = new double[paramsEach];
for (int i = 0; i < vals.length; i++)
vals[i] = dist.invCdf(quantiles[i]);
addParameter((DoubleParameter) param, vals);
}
else if (param instanceof IntParameter)
{
dist = ((IntParameter) param).getGuess(data);
if (dist == null)
continue;
int[] vals = new int[paramsEach];
for (int i = 0; i < vals.length; i++)
vals[i] = (int) Math.round(dist.invCdf(quantiles[i]));
addParameter((IntParameter) param, vals);
}
}
return totalParms;
}
/**
* Sets whether or not warm starts are used, but only if the model in use
* supports warm starts. This is set to {@code true} by default.
*
* @param useWarmStarts {@code true} if warm starts should be used when
* possible, {@code false} otherwise.
*/
public void setUseWarmStarts(boolean useWarmStarts)
{
this.useWarmStarts = useWarmStarts;
}
/**
*
* @return {@code true} if warm starts will be used when possible.
* {@code false} if they will not.
*/
public boolean isUseWarmStarts()
{
return useWarmStarts;
}
/**
* Adds a new double parameter to be altered for the model being tuned.
*
* @param param the model parameter
* @param initialSearchValues the values to try for the specified parameter
*/
public void addParameter(DoubleParameter param, double... initialSearchValues)
{
if(param == null)
throw new IllegalArgumentException("null not allowed for parameter");
searchParams.add(param);
DoubleList dl = new DoubleList(initialSearchValues.length);
for(double d : initialSearchValues)
dl.add(d);
Arrays.sort(dl.getBackingArray());//convience, only really needed if param is warm
if (param.isWarmParameter() && !param.preferredLowToHigh())
Collections.reverse(dl);//put it in the prefered order
if (param.isWarmParameter())//put it at the front!
searchValues.add(0, dl);
else
searchValues.add(dl);
}
/**
* Adds a new double parameter to be altered for the model being tuned.
*
* @param name the name of the parameter
* @param initialSearchValues the values to try for the specified parameter
*/
public void addParameter(String name, double... initialSearchValues)
{
Parameter param;
param = getParameterByName(name);
if (!(param instanceof DoubleParameter))
throw new IllegalArgumentException("Parameter " + name + " is not for double values");
addParameter((DoubleParameter) param, initialSearchValues);
}
/**
* Adds a new int parameter to be altered for the model being tuned.
*
* @param param the model parameter
* @param initialSearchValues the values to try for the specified parameter
*/
public void addParameter(IntParameter param, int... initialSearchValues)
{
searchParams.add(param);
DoubleList dl = new DoubleList(initialSearchValues.length);
for(double d : initialSearchValues)
dl.add(d);
Arrays.sort(dl.getBackingArray());//convience, only really needed if param is warm
if (param.isWarmParameter() && !param.preferredLowToHigh())
Collections.reverse(dl);//put it in the prefered order
if (param.isWarmParameter())//put it at the front!
searchValues.add(0, dl);
else
searchValues.add(dl);
}
/**
* Adds a new integer parameter to be altered for the model being tuned.
*
* @param name the name of the parameter
* @param initialSearchValues the values to try for the specified parameter
*/
public void addParameter(String name, int... initialSearchValues)
{
Parameter param;
param = getParameterByName(name);
if (!(param instanceof IntParameter))
throw new IllegalArgumentException("Parameter " + name + " is not for int values");
addParameter((IntParameter) param, initialSearchValues);
}
@Override
public void train(final RegressionDataSet dataSet, final boolean parallel)
{
final PriorityQueue<RegressionModelEvaluation> bestModels
= new PriorityQueue<>(folds, (RegressionModelEvaluation t, RegressionModelEvaluation t1) ->
{
double v0 = t.getScoreStats(regressionTargetScore).getMean();
double v1 = t1.getScoreStats(regressionTargetScore).getMean();
int order = regressionTargetScore.lowerIsBetter() ? 1 : -1;
return order * Double.compare(v0, v1);
});
/**
* Use this to keep track of which parameter we are altering. Index
* correspondence to the parameter, and its value corresponds to which
* value has been used. Increment and carry counts to iterate over all
* possible combinations.
*/
int[] setTo = new int[searchParams.size()];
/**
* Each model is set to have different combination of parameters. We
* then train each model to determine the best one.
*/
final List<Regressor> paramsToEval = new ArrayList<Regressor>();
while(true)
{
setParameters(setTo);
paramsToEval.add(baseRegressor.clone());
if(incrementCombination(setTo))
break;
}
//if we are doing our CV splits ahead of time, get them done now
final List<RegressionDataSet> preFolded;
/**
* Pre-combine our training combinations so that any caching can be
* re-used
*/
final List<RegressionDataSet> trainCombinations;
if (reuseSameCVFolds)
{
preFolded = dataSet.cvSet(folds);
trainCombinations = new ArrayList<>(preFolded.size());
for (int i = 0; i < preFolded.size(); i++)
trainCombinations.add(RegressionDataSet.comineAllBut(preFolded, i));
}
else
{
preFolded = null;
trainCombinations = null;
}
boolean considerWarm = useWarmStarts && baseRegressor instanceof WarmRegressor;
/**
* make sure we don't do a warm start if its only supported when trained
* on the same data but we aren't reuse-ing the same CV splits So we get
* the truth table
*
* a | b | (a&&b)||¬a
* T | T | T
* T | F | F
* F | T | T
* F | F | T
*
* where a = warmFromSameDataOnly and b = reuseSameSplit
* So we can instead use
* ¬ a || b
*/
if (considerWarm && (!((WarmRegressor) baseRegressor).warmFromSameDataOnly() || reuseSameCVFolds))
{
/* we want all of the first parameter (which is the warm paramter,
* taken care of for us) values done in a group. So We can get this
* by just dividing up the larger list into sub lists, each sub list
* is adjacent in the original and is the number of parameter values
* we wanted to try
*/
ParallelUtils.run(parallel && trainModelsInParallel, paramsToEval.size(), (start, end)->
{
final List<Regressor> subSet = paramsToEval.subList(start, end);
Regressor[] prevModels = null;
for (Regressor r : subSet)
{
RegressionModelEvaluation rme = new RegressionModelEvaluation(r, dataSet, !trainModelsInParallel && parallel);
rme.setKeepModels(true);//we need these to do warm starts!
rme.setWarmModels(prevModels);
rme.addScorer(regressionTargetScore.clone());
if (reuseSameCVFolds)
rme.evaluateCrossValidation(preFolded, trainCombinations);
else
rme.evaluateCrossValidation(folds);
prevModels = rme.getKeptModels();
synchronized (bestModels)
{
bestModels.add(rme);
}
}
});
}
else//regular CV, train a new model from scratch at every step
{
ParallelUtils.run(parallel && trainModelsInParallel, paramsToEval.size(), (indx)->
{
Regressor r = paramsToEval.get(indx);
RegressionModelEvaluation rme = new RegressionModelEvaluation(r, dataSet, !trainModelsInParallel && parallel);
rme.addScorer(regressionTargetScore.clone());
if (reuseSameCVFolds)
rme.evaluateCrossValidation(preFolded, trainCombinations);
else
rme.evaluateCrossValidation(folds);
synchronized (bestModels)
{
bestModels.add(rme);
}
});
}
//Now we know the best classifier, we need to train one on the whole data set.
Regressor bestRegressor = bestModels.peek().getRegressor();//Just re-train it on the whole set
if (trainFinalModel)
{
//try and warm start the final model if we can
if (useWarmStarts && bestRegressor instanceof WarmRegressor
&& !((WarmRegressor) bestRegressor).warmFromSameDataOnly())//last line here needed to make sure we can do this warm train
{
WarmRegressor wr = (WarmRegressor) bestRegressor;
wr.train(dataSet, wr.clone(), parallel);
}
else
{
bestRegressor.train(dataSet, parallel);
}
}
trainedRegressor = bestRegressor;
}
@Override
public void train(final ClassificationDataSet dataSet, final boolean parallel)
{
final PriorityQueue<ClassificationModelEvaluation> bestModels
= new PriorityQueue<>(folds, (ClassificationModelEvaluation t, ClassificationModelEvaluation t1) ->
{
double v0 = t.getScoreStats(classificationTargetScore).getMean();
double v1 = t1.getScoreStats(classificationTargetScore).getMean();
int order = classificationTargetScore.lowerIsBetter() ? 1 : -1;
return order * Double.compare(v0, v1);
});
/**
* Use this to keep track of which parameter we are altering. Index
* correspondence to the parameter, and its value corresponds to which
* value has been used. Increment and carry counts to iterate over all
* possible combinations.
*/
int[] setTo = new int[searchParams.size()];
/**
* Each model is set to have different combination of parameters. We
* then train each model to determine the best one.
*/
final List<Classifier> paramsToEval = new ArrayList<Classifier>();
while(true)
{
setParameters(setTo);
paramsToEval.add(baseClassifier.clone());
if(incrementCombination(setTo))
break;
}
//if we are doing our CV splits ahead of time, get them done now
final List<ClassificationDataSet> preFolded;
/**
* Pre-combine our training combinations so that any caching can be
* re-used
*/
final List<ClassificationDataSet> trainCombinations;
if (reuseSameCVFolds)
{
preFolded = dataSet.cvSet(folds);
trainCombinations = new ArrayList<>(preFolded.size());
for (int i = 0; i < preFolded.size(); i++)
trainCombinations.add(ClassificationDataSet.comineAllBut(preFolded, i));
}
else
{
preFolded = null;
trainCombinations = null;
}
boolean considerWarm = useWarmStarts && baseClassifier instanceof WarmClassifier;
/**
* make sure we don't do a warm start if its only supported when trained
* on the same data but we aren't reuse-ing the same CV splits So we get
* the truth table
*
* a | b | (a&&b)||¬a
* T | T | T
* T | F | F
* F | T | T
* F | F | T
*
* where a = warmFromSameDataOnly and b = reuseSameSplit
* So we can instead use
* ¬ a || b
*/
if (considerWarm && (!((WarmClassifier) baseClassifier).warmFromSameDataOnly() || reuseSameCVFolds))
{
/* we want all of the first parameter (which is the warm paramter,
* taken care of for us) values done in a group. So We can get this
* by just dividing up the larger list into sub lists, each sub list
* is adjacent in the original and is the number of parameter values
* we wanted to try
*/
ParallelUtils.run(parallel && trainModelsInParallel, paramsToEval.size(), (start, end)->
{
final List<Classifier> subSet = paramsToEval.subList(start, end);
Classifier[] prevModels = null;
for (Classifier r : subSet)
{
ClassificationModelEvaluation cme = new ClassificationModelEvaluation(r, dataSet, !trainModelsInParallel && parallel);
cme.setKeepModels(true);//we need these to do warm starts!
cme.setWarmModels(prevModels);
cme.addScorer(classificationTargetScore.clone());
if (reuseSameCVFolds)
cme.evaluateCrossValidation(preFolded, trainCombinations);
else
cme.evaluateCrossValidation(folds);
prevModels = cme.getKeptModels();
synchronized (bestModels)
{
bestModels.add(cme);
}
}
});
}
else//regular CV, train a new model from scratch at every step
{
ParallelUtils.run(parallel && trainModelsInParallel, paramsToEval.size(), (int indx)->
{
Classifier toTrain = paramsToEval.get(indx);
ClassificationModelEvaluation cme = new ClassificationModelEvaluation(toTrain, dataSet, !trainModelsInParallel && parallel);
cme.addScorer(classificationTargetScore.clone());
if (reuseSameCVFolds)
cme.evaluateCrossValidation(preFolded, trainCombinations);
else
cme.evaluateCrossValidation(folds);
synchronized (bestModels)
{
bestModels.add(cme);
}
});
}
//Now we know the best classifier, we need to train one on the whole data set.
Classifier bestClassifier = bestModels.peek().getClassifier();//Just re-train it on the whole set
if (trainFinalModel)
{
//try and warm start the final model if we can
if (useWarmStarts && bestClassifier instanceof WarmClassifier
&& !((WarmClassifier) bestClassifier).warmFromSameDataOnly())//last line here needed to make sure we can do this warm train
{
WarmClassifier wc = (WarmClassifier) bestClassifier;
wc.train(dataSet, wc.clone(), parallel);
}
else
{
bestClassifier.train(dataSet, parallel);
}
}
trainedClassifier = bestClassifier;
}
@Override
public GridSearch clone()
{
return new GridSearch(this);
}
/**
* This increments the array used to keep track of which combinations of
* parameter values have been used.
* @param setTo the array of length equal to {@link #searchParams}
* indicating which parameters have already been tried
* @return a boolean indicating <tt>true</tt> if all combinations have been
* tried, or <tt>false</tt> if combinations remain to be attempted.
*/
private boolean incrementCombination(int[] setTo)
{
setTo[0]++;
int carryPos = 0;
while(carryPos < setTo.length-1 && setTo[carryPos] >= searchValues.get(carryPos).size())
{
setTo[carryPos] = 0;
setTo[++carryPos]++;
}
return setTo[setTo.length-1] >= searchValues.get(setTo.length-1).size();
}
/**
* Sets the parameters according to the given array
* @param setTo the index corresponds to the parameters, and the value which
* parameter value to use.
*/
private void setParameters(int[] setTo)
{
for(int i = 0; i < setTo.length; i++)
{
Parameter param = searchParams.get(i);
if(param instanceof DoubleParameter)
((DoubleParameter)param).setValue(searchValues.get(i).get(setTo[i]));
else if(param instanceof IntParameter)
((IntParameter)param).setValue(searchValues.get(i).get(setTo[i]).intValue());
}
}
}
| 24,244 | 37.484127 | 143 | java |
JSAT | JSAT-master/JSAT/src/jsat/parameters/IntParameter.java | package jsat.parameters;
import jsat.DataSet;
import jsat.distributions.Distribution;
/**
* An integer parameter that may be altered.
*
* @author Edward Raff
*/
public abstract class IntParameter extends Parameter
{
private static final long serialVersionUID = -8467918069240345315L;
/**
* Returns the current value for the parameter.
*
* @return the value for this parameter.
*/
abstract public int getValue();
/**
* Sets the value for this parameter.
* @return <tt>true</tt> if the value was set, <tt>false</tt> if the value
* was invalid, and thus ignored.
*/
abstract public boolean setValue(int val);
/**
* This method allows one to obtain a distribution that represents a
* reasonable "guess" at the range of values that would work for this
* parameter. If the DataSet is an instance of {@link ClassificationDataSet}
* or {@link RegressionDataSet}, the method may choose to assume that the
* value is being guessed for the specified task and change its behavior<br>
* <br>
* Providing a getGuess is not required, and returns {@code null} if
* guessing is not supported.
*
* @param data the data with which we want a reasonable guess for this
* parameter
* @return a distribution that represents a reasonable guess of a good value
* for this parameter given the input data
*/
public Distribution getGuess(DataSet data)
{
return null;
}
@Override
public String getValueString()
{
return Integer.toString(getValue());
}
}
| 1,628 | 28.089286 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/parameters/KernelFunctionParameter.java |
package jsat.parameters;
import java.util.*;
import jsat.distributions.empirical.kernelfunc.*;
/**
* A default Parameter semi-implementation for classes that require a
* {@link KernelFunction} to be specified.
*
* @author Edward Raff
*/
public abstract class KernelFunctionParameter extends ObjectParameter<KernelFunction>
{
private static final long serialVersionUID = 2100826688956817533L;
private final static List<KernelFunction> kernelFuncs = Collections.unmodifiableList(new ArrayList<KernelFunction>()
{/**
*
*/
private static final long serialVersionUID = 4910454799262834767L;
{
add(UniformKF.getInstance());
add(EpanechnikovKF.getInstance());
add(GaussKF.getInstance());
add(BiweightKF.getInstance());
add(TriweightKF.getInstance());
}});
@Override
public List<KernelFunction> parameterOptions()
{
return kernelFuncs;
}
@Override
public String getASCIIName()
{
return "Kernel Function";
}
}
| 1,024 | 22.837209 | 117 | java |
JSAT | JSAT-master/JSAT/src/jsat/parameters/MetricParameter.java | package jsat.parameters;
import jsat.linear.distancemetrics.DistanceMetric;
/**
* A MetricParameter is a parameter controller for the {@link DistanceMetric}
* used by the current algorithm.
*
* @author Edward Raff
*/
public abstract class MetricParameter extends Parameter
{
private static final long serialVersionUID = -8525270531723322719L;
/**
* Sets the distance metric that should be sued
* @param val the distance metric to use
* @return <tt>true</tt> if the metric is valid and was set, <tt>false</tt>
* if the metric was not valid for this learner and ignored.
*/
abstract public boolean setMetric(DistanceMetric val);
/**
* Returns the distance metric that was used for this learner
* @return the current distance metric
*/
abstract public DistanceMetric getMetric();
@Override
public String getASCIIName()
{
return "Distance Metric";
}
@Override
public String getValueString()
{
return getMetric().toString();
}
}
| 1,053 | 24.095238 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/parameters/ModelSearch.java | /*
* Copyright (C) 2015 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.parameters;
import java.util.ArrayList;
import java.util.List;
import jsat.classifiers.CategoricalResults;
import jsat.classifiers.Classifier;
import jsat.classifiers.DataPoint;
import jsat.classifiers.evaluation.Accuracy;
import jsat.classifiers.evaluation.ClassificationScore;
import jsat.exceptions.FailedToFitException;
import jsat.exceptions.UntrainedModelException;
import jsat.regression.Regressor;
import jsat.regression.evaluation.MeanSquaredError;
import jsat.regression.evaluation.RegressionScore;
/**
* This abstract class provides boilerplate for algorithms that search a model's
* parameter space to find the parameters that provide the best overall
* performance.
*
* @author Edward Raff
*/
abstract public class ModelSearch implements Classifier, Regressor
{
protected Classifier baseClassifier;
protected Classifier trainedClassifier;
protected ClassificationScore classificationTargetScore = new Accuracy();
protected RegressionScore regressionTargetScore = new MeanSquaredError(true);
protected Regressor baseRegressor;
protected Regressor trainedRegressor;
/**
* The list of parameters we will search for, currently only Int and Double
* params should be used
*/
protected List<Parameter> searchParams;
/**
* The number of CV folds
*/
protected int folds;
/**
* If true, parallelism will be obtained by training the models in parallel.
* If false, parallelism is obtained from the model itself.
*/
protected boolean trainModelsInParallel = true;
/**
* If true, trains the final model on the parameters used
*/
protected boolean trainFinalModel = true;
/**
* If true, create the CV splits once and re-use them for all parameters
*/
protected boolean reuseSameCVFolds = true;
public ModelSearch(Regressor baseRegressor, int folds)
{
if (!(baseRegressor instanceof Parameterized))
throw new FailedToFitException("Given regressor does not support parameterized alterations");
this.baseRegressor = baseRegressor;
if (baseRegressor instanceof Classifier)
this.baseClassifier = (Classifier) baseRegressor;
searchParams = new ArrayList<Parameter>();
this.folds = folds;
}
public ModelSearch(Classifier baseClassifier, int folds)
{
if (!(baseClassifier instanceof Parameterized))
throw new FailedToFitException("Given classifier does not support parameterized alterations");
this.baseClassifier = baseClassifier;
if (baseClassifier instanceof Regressor)
this.baseRegressor = (Regressor) baseClassifier;
searchParams = new ArrayList<Parameter>();
this.folds = folds;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public ModelSearch(ModelSearch toCopy)
{
if (toCopy.baseClassifier != null)
{
this.baseClassifier = toCopy.baseClassifier.clone();
if (this.baseClassifier instanceof Regressor)
this.baseRegressor = (Regressor) this.baseClassifier;
}
else
{
this.baseRegressor = toCopy.baseRegressor.clone();
if (this.baseRegressor instanceof Classifier)
this.baseClassifier = (Classifier) this.baseRegressor;
}
if (toCopy.trainedClassifier != null)
this.trainedClassifier = toCopy.trainedClassifier.clone();
if (toCopy.trainedRegressor != null)
this.trainedRegressor = toCopy.trainedRegressor.clone();
this.searchParams = new ArrayList<Parameter>();
for (Parameter p : toCopy.searchParams)
this.searchParams.add(getParameterByName(p.getName()));
this.folds = toCopy.folds;
}
/**
* When set to {@code true} (the default) parallelism is obtained by
* training as many models in parallel as possible. If {@code false},
* parallelsm will be obtained by training the model using the {@link Classifier#train(jsat.classifiers.ClassificationDataSet, java.util.concurrent.ExecutorService)
* } and {@link Regressor#train(jsat.regression.RegressionDataSet, java.util.concurrent.ExecutorService)
* } methods.<br>
* <br>
* When a model supports {@link #setUseWarmStarts(boolean) warms starts},
* parallelism obtained by training the models in parallel is intrinsically
* reduced, as a model can not be warms started until another model has
* finished. In the case that one of the parameters is annotated as a
* {@link Parameter.WarmParameter warm paramter} , that parameter will be
* the one rained sequential, and for every other parameter combination
* models will be trained in parallel. If there is no warm parameter, the
* first parameter added will be used for warm training. If there is only
* one parameter and warm training is occurring, no parallelism will be
* obtained.
*
* @param trainInParallel {@code true} to get parallelism from training many
* models at the same time, {@code false} to get parallelism from getting
* the model's implicit parallelism.
*/
public void setTrainModelsInParallel(boolean trainInParallel)
{
this.trainModelsInParallel = trainInParallel;
}
/**
*
* @return {@code true} if parallelism is obtained from training many models
* at the same time, {@code false} if parallelism is obtained from using the
* model's implicit parallelism.
*/
public boolean isTrainModelsInParallel()
{
return trainModelsInParallel;
}
/**
* If {@code true} (the default) the model that was found to be best is
* trained on the whole data set at the end. If {@code false}, the final
* model will not be trained. This means that this Object will not be usable
* for predictoin. This should only be set if you know you will not be using
* this model but only want to get the information about which parameter
* combination is best.
*
* @param trainFinalModel {@code true} to train the final model after grid
* search, {@code false} to not do that.
*/
public void setTrainFinalModel(boolean trainFinalModel)
{
this.trainFinalModel = trainFinalModel;
}
/**
*
* @return {@code true} to train the final model after grid search,
* {@code false} to not do that.
*/
public boolean isTrainFinalModel()
{
return trainFinalModel;
}
/**
* Sets whether or not one set of CV folds is created and re used for every
* parameter combination (the default), or if a difference set of CV folds
* will be used for every parameter combination.
*
* @param reuseSameSplit {@code true} if the same split is re-used for every
* combination, {@code false} if a new CV set is used for every parameter
* combination.
*/
public void setReuseSameCVFolds(boolean reuseSameSplit)
{
this.reuseSameCVFolds = reuseSameSplit;
}
/**
*
* @return {@code true} if the same split is re-used for every combination,
* {@code false} if a new CV set is used for every parameter combination.
*/
public boolean isReuseSameCVFolds()
{
return reuseSameCVFolds;
}
/**
* Returns the base classifier that was originally passed in when
* constructing this GridSearch. If this was not constructed with a
* classifier, this may return null.
*
* @return the original classifier object given
*/
public Classifier getBaseClassifier()
{
return baseClassifier;
}
/**
* Returns the resultant classifier trained on the whole data set after
* performing parameter tuning.
*
* @return the trained classifier after a call to {@link #train(jsat.regression.RegressionDataSet,
* java.util.concurrent.ExecutorService) }, or null if it has not been
* trained.
*/
public Classifier getTrainedClassifier()
{
return trainedClassifier;
}
/**
* Returns the base regressor that was originally passed in when
* constructing this GridSearch. If this was not constructed with a
* regressor, this may return null.
*
* @return the original regressor object given
*/
public Regressor getBaseRegressor()
{
return baseRegressor;
}
/**
* Returns the resultant regressor trained on the whole data set after
* performing parameter tuning.
*
* @return the trained regressor after a call to {@link #train(jsat.regression.RegressionDataSet,
* java.util.concurrent.ExecutorService) }, or null if it has not been
* trained.
*/
public Regressor getTrainedRegressor()
{
return trainedRegressor;
}
/**
* Sets the score to attempt to optimize when performing grid search on a
* classification problem.
*
* @param classifierTargetScore the score to optimize via grid search
*/
public void setClassificationTargetScore(ClassificationScore classifierTargetScore)
{
this.classificationTargetScore = classifierTargetScore;
}
/**
* Returns the classification score that is trying to be optimized via grid
* search
*
* @return the classification score that is trying to be optimized via grid
* search
*/
public ClassificationScore getClassificationTargetScore()
{
return classificationTargetScore;
}
/**
* Sets the score to attempt to optimize when performing grid search on a
* regression problem.
*
* @param regressionTargetScore
*/
public void setRegressionTargetScore(RegressionScore regressionTargetScore)
{
this.regressionTargetScore = regressionTargetScore;
}
/**
* Returns the regression score that is trying to be optimized via grid
* search
*
* @return the regression score that is trying to be optimized via grid
* search
*/
public RegressionScore getRegressionTargetScore()
{
return regressionTargetScore;
}
/**
* Finds the parameter object with the given name, or throws an exception if
* a parameter with the given name does not exist.
*
* @param name the name to search for
* @return the parameter object in question
* @throws IllegalArgumentException if the name is not found
*/
protected Parameter getParameterByName(String name) throws IllegalArgumentException
{
Parameter param;
if (baseClassifier != null)
param = ((Parameterized) baseClassifier).getParameter(name);
else
param = ((Parameterized) baseRegressor).getParameter(name);
if (param == null)
throw new IllegalArgumentException("Parameter " + name + " does not exist");
return param;
}
@Override
public CategoricalResults classify(DataPoint data)
{
if (trainedClassifier == null)
throw new UntrainedModelException("Model has not yet been trained");
return trainedClassifier.classify(data);
}
@Override
public double regress(DataPoint data)
{
if (trainedRegressor == null)
throw new UntrainedModelException("Model has not yet been trained");
return trainedRegressor.regress(data);
}
@Override
public boolean supportsWeightedData()
{
return baseClassifier != null ? baseClassifier.supportsWeightedData() : baseRegressor.supportsWeightedData();
}
@Override
abstract public ModelSearch clone();
}
| 12,439 | 33.845938 | 168 | java |
JSAT | JSAT-master/JSAT/src/jsat/parameters/ObjectParameter.java | package jsat.parameters;
import java.util.List;
/**
* A parameter that could be one of a finite number of possible objects.
*
* @author Edward Raff
*/
public abstract class ObjectParameter<T> extends Parameter
{
private static final long serialVersionUID = 7639067170001873762L;
/**
* Returns the current object value
* @return the current object set for the parameter
*/
abstract public T getObject();
/**
* Sets the parameter to the given object
* @param obj the new parameter value
* @return <tt>true</tt> if the value was set, <tt>false</tt> if the value
* was invalid, and thus ignored.
*/
abstract public boolean setObject(T obj);
/**
* Returns a list of all possible objects that may be used as a parameter.
* @return a list of all possible objects that may be used as a parameter.
*/
abstract public List<T> parameterOptions();
@Override
public String getValueString()
{
return getObject().toString();
}
}
| 1,051 | 24.047619 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/parameters/Parameter.java | package jsat.parameters;
import java.io.Serializable;
import java.lang.annotation.*;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.util.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.distributions.Distribution;
import jsat.distributions.empirical.kernelfunc.KernelFunction;
import jsat.linear.distancemetrics.DistanceMetric;
import jsat.math.decayrates.DecayRate;
/**
* This interface provides a programmable manner in which the parameters of an
* algorithm may be altered and adjusted.
*
* @author Edward Raff
*/
public abstract class Parameter implements Serializable
{
private static final long serialVersionUID = -6903844587637472657L;
/**
* Adding this annotation to a field tells the
* {@link #getParamsFromMethods(java.lang.Object)} method to search this
* object recursively for more parameter get/set
* pairs.<br><br>
* Placing this annotation on a {@link Collection} will cause the search to
* be done recursively over each item in the collection.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public static @interface ParameterHolder
{
boolean skipSelfNamePrefix() default false;
}
/**
* Adding this annotation to a method tells the {@link #getParamsFromMethods(java.lang.Object)
* } method to consider the Parameter object generated for that method a
* preferred parameter for warm starting.
*/
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.METHOD)
public static @interface WarmParameter
{
/**
* Indicates the ordering that would be preferred by the algorithm when
* training. <br>
* <br>
* It may be the case that the model has no preference for models to be
* trained from low to high or high to low, in which case any arbitrary
* value may be returned - so long as it consistently returns the same
* value.
*
* @return {@code true} if it is preferred to train from low values of
* the parameter to high values of the parameter. {@code false} is
* returned if it is preferred to go from high to low values.
*/
boolean prefLowToHigh();
}
/**
* Some variables of a learning method may be adjustable without having to
* re-train the whole data set. <tt>false</tt> is returned if this is such a
* parameter, <tt>true</tt> if the learning method will need to be
* retrained after the parameter has changed. <br><br>
* By default, this method returns <tt>true</tt> unless overwritten, as it
* is always safe to retrain the classifier if a parameter was changed.
* @return <tt>true</tt> if changing this parameter requires a re-training
* of the algorithm, or <tt>false</tt> if no-retraining is needed to take
* effect.
*/
public boolean requiresRetrain(){
return true;
};
/**
* If {@code true}, that means this parameter is the preferred parameter to
* be altered when warm starting from a previous version of the same class.
* Being the preferred parameter means that using warms started training on
* a sequence of changes to this parameter should be faster than simply
* training normally for every combination of values.<br>
* <br>
* If more than one parameter would have this impact on training, the one
* that has the largest and most consistent impact should be selected.<br>
* <br>
* By default, this method will return {@code false}.
*
* @return {@code true} if warm starting on changes in this parameter has a
* significant impact on training speed, {@code false} otherwise.
*/
public boolean isWarmParameter(){
return false;
};
/**
* This value is meaningless if {@link #isWarmParameter() } is {@code false}
* , and by default returns {@code false}. <br>
* <br>
* This should return the preferred order of warm start value training in
* the progression of the warm parameter, either from low values to high
* values (ie: the model being trained has a higher value for this parameter
* than the warm model being trained from).
*
* @return {@code true} if warm starting on this parameter should occur from
* low values to high values, and {@code false} if it should go from high
* values to low.
*/
public boolean preferredLowToHigh() {
return false;
}
/**
* Returns the name of this parameter using only valid ACII characters.
* @return the ACII name
*/
abstract public String getASCIIName();
/**
* Returns the display name of this parameter. By default, this returns the
* {@link #getASCIIName() ASCII name} of the parameter. If one exists, a
* name using Unicode characters may be returned instead.
*
* @return the name of this parameter
*/
public String getName()
{
return getASCIIName();
}
@Override
public String toString()
{
return getName();
}
@Override
public int hashCode()
{
return getName().hashCode();
}
/**
* Returns a string indicating the value currently held by the Parameter
*
* @return a string representation of the parameter's value
*/
abstract public String getValueString();
@Override
public boolean equals(Object obj)
{
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
final Parameter other = (Parameter) obj;
return this.getName().equals(other.getName());
}
/**
* Creates a map of all possible parameter names to their corresponding object. No two parameters may have the same name.
* @param params the list of parameters to create a map for
* @return a map of string names to their parameters
* @throws RuntimeException if two parameters have the same name
*/
public static Map<String, Parameter> toParameterMap(List<Parameter> params)
{
Map<String, Parameter> map = new HashMap<String, Parameter>(params.size());
for(Parameter param : params)
{
if(map.put(param.getASCIIName(), param) != null)
throw new RuntimeException("Name collision, two parameters use the name '" + param.getASCIIName() + "'");
if(!param.getName().equals(param.getASCIIName()))//Dont put it in again
if(map.put(param.getName(), param) != null)
throw new RuntimeException("Name collision, two parameters use the name '" + param.getName() + "'");
}
return map;
}
/**
* Given an object, this method will use reflection to automatically find
* getter and setter method pairs, and create Parameter object for each
* getter setter pair.<br>
* Getters are found by searching for no argument methods that start with
* "get" or "is". Setters are found by searching for one argument methods
* that start with "set".
* A getter and setter are a pair only if everything after the prefix is the
* same in the method's name, and the return type of the getter is the same
* class as the argument for the setter. <br>
* Current types supported are:
* <ul>
* <li>integer</li>
* <li>doubles</li>
* <li>booleans</li>
* <li>{@link KernelFunction Kernel Functions}</li>
* <li>{@link DistanceMetric Distance Metrics}</li>
* <li>{@link Enum Enums}</li>
* </ul>
*
* @param obj
* @return a list of parameter objects generated from the given object
*/
public static List<Parameter> getParamsFromMethods(final Object obj)
{
return getParamsFromMethods(obj, "");
}
private static List<Parameter> getParamsFromMethods(final Object obj, String prefix)
{
Map<String, Method> getMethods = new HashMap<String, Method>();
Map<String, Method> setMethods = new HashMap<String, Method>();
//Collect potential get/set method pairs
for(Method method : obj.getClass().getMethods())
{
int paramCount = method.getParameterTypes().length;
if(method.isVarArgs() || paramCount > 1)
continue;
String name = method.getName();
if(name.startsWith("get") && paramCount == 0)
getMethods.put(name.substring(3), method);
else if(name.startsWith("is") && paramCount == 0)
getMethods.put(name.substring(2), method);
else if(name.startsWith("set") && paramCount == 1)
setMethods.put(name.substring(3), method);
}
//Find pairings and add to list
List<Parameter> params = new ArrayList<Parameter>(Math.min(getMethods.size(), setMethods.size()));
for(Map.Entry<String, Method> entry : setMethods.entrySet())
{
final Method setMethod = entry.getValue();
final Method getMethod = getMethods.get(entry.getKey());
if(getMethod == null)
continue;
final Class retClass = getMethod.getReturnType();
final Class argClass = entry.getValue().getParameterTypes()[0];
if(!retClass.equals(argClass))
continue;
final String name = spaceCamelCase(entry.getKey());
//Found a match do we know how to handle it?
Parameter param = getParam(obj, argClass, getMethod, setMethod, prefix + name);
if(param != null)
params.add(param);
}
//Find params from field objects
//first get all fields of this object
List<Field> fields = new ArrayList<Field>();
Class curClassLevel = obj.getClass();
while(curClassLevel != null)
{
fields.addAll(Arrays.asList(curClassLevel.getDeclaredFields()));
curClassLevel = curClassLevel.getSuperclass();
}
final String simpleObjName = obj.getClass().getSimpleName();
//For each field, check if it has our magic annotation
for(Field field : fields)
{
Annotation[] annotations = field.getAnnotations();
for(Annotation annotation : annotations)
{
if(annotation.annotationType().equals(ParameterHolder.class))
{
ParameterHolder annotationPH = (ParameterHolder) annotation;
//get the field value fromt he object passed in
try
{
//If its private/protected we are not int he same object chain
field.setAccessible(true);
Object paramHolder = field.get(obj);
if(paramHolder instanceof Collection)//serach for each item in the collection
{
Collection toSearch = (Collection) paramHolder;
for(Object paramHolderSub : toSearch)
{
String subPreFix = paramHolderSub.getClass().getSimpleName() + "_";
if(annotationPH.skipSelfNamePrefix())
subPreFix = prefix.replace(simpleObjName+"_", "") + subPreFix;
else
subPreFix = prefix + subPreFix;
params.addAll(Parameter.getParamsFromMethods(paramHolderSub, subPreFix));
}
}
else if(paramHolder != null)//search the item directly
{
String subPreFix = paramHolder.getClass().getSimpleName() + "_";
if (annotationPH.skipSelfNamePrefix())
subPreFix = prefix.replace(simpleObjName + "_", "") + subPreFix;
else
subPreFix = prefix + subPreFix;
params.addAll(Parameter.getParamsFromMethods(paramHolder, subPreFix));
}
}
catch (IllegalArgumentException ex)
{
Logger.getLogger(Parameter.class.getName()).log(Level.SEVERE, null, ex);
}
catch (IllegalAccessException ex)
{
Logger.getLogger(Parameter.class.getName()).log(Level.SEVERE, null, ex);
}
}
}
}
return params;
}
private static Parameter getParam(final Object targetObject, final Class varClass, final Method getMethod, final Method setMethod, final String asciiName)
{
return getParam(targetObject, varClass, getMethod, setMethod, asciiName, null);
}
private static Parameter getParam(final Object targetObject, final Class varClass, final Method getMethod, final Method setMethod, final String asciiName, final String uniName)
{
final boolean warm;
final boolean lowToHigh;
//lets find out if this paramter is a "warm" parameter
Parameter.WarmParameter warmAna = null;
warmAna = setMethod.getAnnotation(Parameter.WarmParameter.class);
if(warmAna == null)
warmAna = getMethod.getAnnotation(Parameter.WarmParameter.class);
if(warmAna != null)
{
warm = true;
lowToHigh = warmAna.prefLowToHigh();
}
else
{
warm = false;
lowToHigh = false;
}
//lets see if we can find a method that corresponds to "guess" for this parameter
final Method guessMethod;
Method tmp = null;
try
{
tmp = targetObject.getClass().getMethod("guess" + setMethod.getName().replaceFirst("set", ""), DataSet.class);
}
catch (NoSuchMethodException ex)
{
}
catch (SecurityException ex)
{
}
guessMethod = tmp;//ugly hack so that I can reference a final guess method in anon class below
//ok, now lets go create the correct object type
Parameter param = null;
if (varClass.equals(double.class) || varClass.equals(Double.class))
{
param = new DoubleParameter()
{
private static final long serialVersionUID = -4741218633343565521L;
@Override
public double getValue()
{
try
{
return (Double) getMethod.invoke(targetObject);
}
catch (Exception ex)
{
}
return Double.NaN;
}
@Override
public boolean setValue(double val)
{
try
{
setMethod.invoke(targetObject, val);
return true;
}
catch (Exception ex)
{
}
return false;
}
@Override
public boolean isWarmParameter()
{
return warm;
}
@Override
public boolean preferredLowToHigh()
{
return lowToHigh;
}
@Override
public String getASCIIName()
{
return asciiName;
}
@Override
public String getName()
{
if (uniName == null)
return super.getName();
else
return uniName;
}
@Override
public Distribution getGuess(DataSet data)
{
if (guessMethod == null)
return null;
try
{
return (Distribution) guessMethod.invoke(targetObject, data);
}
catch (Exception ex)
{
}
return null;
}
};
}
else if (varClass.equals(int.class) || varClass.equals(Integer.class))
{
param = new IntParameter()
{
private static final long serialVersionUID = 693593136858174197L;
@Override
public int getValue()
{
try
{
return (Integer) getMethod.invoke(targetObject);
}
catch (Exception ex)
{
}
return -1;
}
@Override
public boolean setValue(int val)
{
try
{
setMethod.invoke(targetObject, val);
return true;
}
catch (Exception ex)
{
}
return false;
}
@Override
public Distribution getGuess(DataSet data)
{
if (guessMethod == null)
return null;
try
{
return (Distribution) guessMethod.invoke(targetObject, data);
}
catch (Exception ex)
{
}
return null;
}
@Override
public boolean isWarmParameter()
{
return warm;
}
@Override
public boolean preferredLowToHigh()
{
return lowToHigh;
}
@Override
public String getASCIIName()
{
return asciiName;
}
@Override
public String getName()
{
if (uniName == null)
return super.getName();
else
return uniName;
}
};
}
else if (varClass.equals(boolean.class) || varClass.equals(Boolean.class))
{
param = new BooleanParameter()
{
private static final long serialVersionUID = 8356074301252766754L;
@Override
public boolean getValue()
{
try
{
return (Boolean) getMethod.invoke(targetObject);
}
catch (Exception ex)
{
}
return false;
}
@Override
public boolean setValue(boolean val)
{
try
{
setMethod.invoke(targetObject, val);
return true;
}
catch (Exception ex)
{
}
return false;
}
@Override
public String getASCIIName()
{
return asciiName;
}
@Override
public String getName()
{
if (uniName == null)
return super.getName();
else
return uniName;
}
};
}
else if (varClass.equals(KernelFunction.class))
{
param = new KernelFunctionParameter()
{
private static final long serialVersionUID = -482809259476649959L;
@Override
public KernelFunction getObject()
{
try
{
return (KernelFunction) getMethod.invoke(targetObject);
}
catch (Exception ex)
{
}
return null;
}
@Override
public boolean setObject(KernelFunction val)
{
try
{
setMethod.invoke(targetObject, val);
return true;
}
catch (Exception ex)
{
}
return false;
}
};
}
else if (varClass.equals(DistanceMetric.class))
{
param = new MetricParameter()
{
private static final long serialVersionUID = -2823576782267398656L;
@Override
public DistanceMetric getMetric()
{
try
{
return (DistanceMetric) getMethod.invoke(targetObject);
}
catch (Exception ex)
{
}
return null;
}
@Override
public boolean setMetric(DistanceMetric val)
{
try
{
setMethod.invoke(targetObject, val);
return true;
}
catch (Exception ex)
{
}
return false;
}
};
}
else if (varClass.equals(DecayRate.class))
{
param = new DecayRateParameter()
{
private static final long serialVersionUID = 5348280386363008701L;
@Override
public DecayRate getObject()
{
try
{
return (DecayRate) getMethod.invoke(targetObject);
}
catch (Exception ex)
{
}
return null;
}
@Override
public boolean setObject(DecayRate obj)
{
try
{
setMethod.invoke(targetObject, obj);
return true;
}
catch (Exception ex)
{
}
return false;
}
@Override
public String getASCIIName()
{
return asciiName;
}
@Override
public String getName()
{
if (uniName == null)
return super.getName();
else
return uniName;
}
};
}
else if (varClass.isEnum())//We can create an ObjectParameter for enums
{
param = new ObjectParameter()
{
private static final long serialVersionUID = -6245401198404522216L;
@Override
public Object getObject()
{
try
{
return getMethod.invoke(targetObject);
}
catch (Exception ex)
{
}
return null;
}
@Override
public boolean setObject(Object val)
{
try
{
setMethod.invoke(targetObject, val);
return true;
}
catch (Exception ex)
{
}
return false;
}
@Override
public List parameterOptions()
{
return Collections.unmodifiableList(Arrays.asList(varClass.getEnumConstants()));
}
@Override
public String getASCIIName()
{
return asciiName;
}
@Override
public String getName()
{
if (uniName == null)
return super.getName();
else
return uniName;
}
};
}
return param;
}
/**
* Returns a version of the same string that has spaced inserted before each
* capital letter
* @param in the CamelCase string
* @return the spaced Camel Case string
*/
private static String spaceCamelCase(String in)
{
StringBuilder sb = new StringBuilder(in.length()+5);
for(int i = 0; i < in.length(); i++)
{
char c = in.charAt(i);
if(Character.isUpperCase(c))
sb.append(' ');
sb.append(c);
}
return sb.toString().trim();
}
}
| 26,218 | 32.188608 | 180 | java |
JSAT | JSAT-master/JSAT/src/jsat/parameters/Parameterized.java | package jsat.parameters;
import java.util.List;
/**
* An algorithm may be Parameterized, meaning it has one or more parameters that
* can be tuned or alter the results of the algorithm in question.
*
* @author Edward Raff
*/
public interface Parameterized
{
/**
* Returns the list of parameters that can be altered for this learner.
* @return the list of parameters that can be altered for this learner.
*/
default public List<Parameter> getParameters()
{
return Parameter.getParamsFromMethods(this);
}
/**
* Returns the parameter with the given name. Two different strings may map
* to a single Parameter object. An ASCII only string, and a Unicode style
* string.
* @param paramName the name of the parameter to obtain
* @return the Parameter in question, or null if no such named Parameter exists.
*/
default public Parameter getParameter(String paramName)
{
return Parameter.toParameterMap(getParameters()).get(paramName);
}
}
| 1,044 | 29.735294 | 85 | java |
JSAT | JSAT-master/JSAT/src/jsat/parameters/RandomSearch.java | /*
* Copyright (C) 2015 Edward Raff
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package jsat.parameters;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.PriorityQueue;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import jsat.DataSet;
import jsat.classifiers.ClassificationDataSet;
import jsat.classifiers.ClassificationModelEvaluation;
import jsat.classifiers.Classifier;
import jsat.distributions.Distribution;
import jsat.exceptions.FailedToFitException;
import jsat.regression.RegressionDataSet;
import jsat.regression.RegressionModelEvaluation;
import jsat.regression.Regressor;
import jsat.utils.FakeExecutor;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
import jsat.utils.random.XORWOW;
/**
* Random Search is a simple method for tuning the parameters of a
* classification or regression algorithm. Each parameter is given a
* distribution that represents the values of interest, and trials are done by
* randomly sampling each parameter from their respective distributions.
* Compared to {@link GridSearch} this method does better when lots of values
* are to be tested or when 2 or more parameters are to be evaluated. <br>
* The model it takes must implement the {@link Parameterized} interface. By
* default, no parameters are selected for optimizations. This is because
* parameters value ranges are often algorithm specific. As such, the user must
* specify the parameters and the values to test using the <tt>addParameter</tt>
* methods.
*
* See : Bergstra, J., & Bengio, Y. (2012). <i>Random Search for Hyper-Parameter Optimization</i>. Journal ofMachine Learning Research, 13, 281–305.
* @author Edward Raff
*/
public class RandomSearch extends ModelSearch
{
private int trials = 25;
/**
* The matching list of distributions we will test.
*/
private List<Distribution> searchValues;
/**
* Creates a new GridSearch to tune the specified parameters of a regression
* model. The parameters still need to be specified by calling
* {@link #addParameter(jsat.parameters.DoubleParameter, double[]) }
*
* @param baseRegressor the regressor to tune the parameters of
* @param folds the number of folds of cross-validation to perform to
* evaluate each combination of parameters
* @throws FailedToFitException if the base regressor does not implement
* {@link Parameterized}
*/
public RandomSearch(Regressor baseRegressor, int folds)
{
super(baseRegressor, folds);
searchValues = new ArrayList<Distribution>();
}
/**
* Creates a new GridSearch to tune the specified parameters of a
* classification model. The parameters still need to be specified by
* calling {@link #addParameter(jsat.parameters.DoubleParameter, double[]) }
*
* @param baseClassifier the classifier to tune the parameters of
* @param folds the number of folds of cross-validation to perform to
* evaluate each combination of parameters
* @throws FailedToFitException if the base classifier does not implement
* {@link Parameterized}
*/
public RandomSearch(Classifier baseClassifier, int folds)
{
super(baseClassifier, folds);
searchValues = new ArrayList<Distribution>();
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public RandomSearch(RandomSearch toCopy)
{
super(toCopy);
this.trials = toCopy.trials;
this.searchValues = new ArrayList<Distribution>(toCopy.searchValues.size());
for (Distribution d : toCopy.searchValues)
this.searchValues.add(d.clone());
}
/**
* This method will automatically populate the search space with parameters
* based on which Parameter objects return non-null distributions.<br>
* <br>
* Note, using this method with Cross Validation has the potential for
* over-estimating the accuracy of results if the data set is actually used
* to for parameter guessing.<br>
* <br>
* It is possible for this method to return 0, indicating that no default
* parameters could be found. The intended interpretation is that there are
* no parameters that you <i>need</i> to tune to get good performance from
* the given model. Though there will be cases where the author has simply
* missed a class.
*
*
* @param data the data set to get parameter estimates from
* @return the number of parameters added
*/
public int autoAddParameters(DataSet data)
{
Parameterized obj;
if (baseClassifier != null)
obj = (Parameterized) baseClassifier;
else
obj = (Parameterized) baseRegressor;
int totalParms = 0;
for (Parameter param : obj.getParameters())
{
Distribution dist;
if (param instanceof DoubleParameter)
{
dist = ((DoubleParameter) param).getGuess(data);
if (dist != null)
{
addParameter((DoubleParameter) param, dist);
totalParms++;
}
}
else if (param instanceof IntParameter)
{
dist = ((IntParameter) param).getGuess(data);
if (dist != null)
{
addParameter((IntParameter) param, dist);
totalParms++;
}
}
}
return totalParms;
}
/**
* Sets the number of trials or samples that will be taken. This value is the number of models that will be trained and evaluated for their performance
* @param trials the number of models to build and evaluate
*/
public void setTrials(int trials)
{
if(trials < 1)
throw new IllegalArgumentException("number of trials must be positive, not " + trials);
this.trials = trials;
}
/**
*
* @return the number of models that will be built to evaluate
*/
public int getTrials()
{
return trials;
}
/**
* Adds a new double parameter to be altered for the model being tuned.
*
* @param param the model parameter
* @param initialSearchValues the distribution to sample from for this parameter
*/
public void addParameter(DoubleParameter param, Distribution dist)
{
if (param == null)
throw new IllegalArgumentException("null not allowed for parameter");
searchParams.add(param);
searchValues.add(dist.clone());
}
/**
* Adds a new double parameter to be altered for the model being tuned.
*
* @param param the model parameter
* @param initialSearchValues the distribution to sample from for this parameter
*/
public void addParameter(IntParameter param, Distribution dist)
{
if (param == null)
throw new IllegalArgumentException("null not allowed for parameter");
searchParams.add(param);
searchValues.add(dist.clone());
}
/**
* Adds a new parameter to be altered for the model being tuned.
*
* @param name the name of the parameter
* @param initialSearchValues the values to try for the specified parameter
*/
public void addParameter(String name, Distribution dist)
{
Parameter param = getParameterByName(name);
if(param instanceof DoubleParameter)
addParameter((DoubleParameter) param, dist);
else if(param instanceof IntParameter)
addParameter((IntParameter) param, dist);
else
throw new IllegalArgumentException("Parameter " + name + " is not for double or int values");
}
@Override
public void train(final ClassificationDataSet dataSet, final boolean parallel)
{
final PriorityQueue<ClassificationModelEvaluation> bestModels
= new PriorityQueue<>(folds, (ClassificationModelEvaluation t, ClassificationModelEvaluation t1) ->
{
double v0 = t.getScoreStats(classificationTargetScore).getMean();
double v1 = t1.getScoreStats(classificationTargetScore).getMean();
int order = classificationTargetScore.lowerIsBetter() ? 1 : -1;
return order * Double.compare(v0, v1);
});
/**
* Each model is set to have different combination of parameters. We
* then train each model to determine the best one.
*/
final List<Classifier> paramsToEval = new ArrayList<Classifier>();
Random rand = RandomUtil.getRandom();
for(int trial = 0; trial < trials; trial++)
{
for(int i = 0; i < searchParams.size(); i++)
{
double sampledValue = searchValues.get(i).invCdf(rand.nextDouble());
Parameter param = searchParams.get(i);
if(param instanceof DoubleParameter)
((DoubleParameter)param).setValue(sampledValue);
else if(param instanceof IntParameter)
((IntParameter)param).setValue((int) Math.round(sampledValue));
}
paramsToEval.add(baseClassifier.clone());
}
//if we are doing our CV splits ahead of time, get them done now
final List<ClassificationDataSet> preFolded;
/**
* Pre-combine our training combinations so that any caching can be
* re-used
*/
final List<ClassificationDataSet> trainCombinations;
if (reuseSameCVFolds)
{
preFolded = dataSet.cvSet(folds);
trainCombinations = new ArrayList<>(preFolded.size());
for (int i = 0; i < preFolded.size(); i++)
trainCombinations.add(ClassificationDataSet.comineAllBut(preFolded, i));
}
else
{
preFolded = null;
trainCombinations = null;
}
ParallelUtils.run(parallel && trainModelsInParallel, paramsToEval.size(), (indx)->
{
Classifier c = paramsToEval.get(indx);
ClassificationModelEvaluation cme = new ClassificationModelEvaluation(c, dataSet, !trainModelsInParallel && parallel);
cme.addScorer(classificationTargetScore.clone());
if (reuseSameCVFolds)
cme.evaluateCrossValidation(preFolded, trainCombinations);
else
cme.evaluateCrossValidation(folds);
synchronized (bestModels)
{
bestModels.add(cme);
}
});
Classifier bestClassifier = bestModels.peek().getClassifier();//Just re-train it on the whole set
if (trainFinalModel)
bestClassifier.train(dataSet, parallel);
trainedClassifier = bestClassifier;
}
@Override
public void train(final RegressionDataSet dataSet, final boolean parallel)
{
final PriorityQueue<RegressionModelEvaluation> bestModels
= new PriorityQueue<>(folds, (RegressionModelEvaluation t, RegressionModelEvaluation t1) ->
{
double v0 = t.getScoreStats(regressionTargetScore).getMean();
double v1 = t1.getScoreStats(regressionTargetScore).getMean();
int order = regressionTargetScore.lowerIsBetter() ? 1 : -1;
return order * Double.compare(v0, v1);
});
/**
* Each model is set to have different combination of parameters. We
* then train each model to determine the best one.
*/
final List<Regressor> paramsToEval = new ArrayList<>();
Random rand = RandomUtil.getRandom();
for(int trial = 0; trial < trials; trial++)
{
for(int i = 0; i < searchParams.size(); i++)
{
double sampledValue = searchValues.get(i).invCdf(rand.nextDouble());
Parameter param = searchParams.get(i);
if(param instanceof DoubleParameter)
((DoubleParameter)param).setValue(sampledValue);
else if(param instanceof IntParameter)
((IntParameter)param).setValue((int) Math.round(sampledValue));
}
paramsToEval.add(baseRegressor.clone());
}
//if we are doing our CV splits ahead of time, get them done now
final List<RegressionDataSet> preFolded;
/**
* Pre-combine our training combinations so that any caching can be
* re-used
*/
final List<RegressionDataSet> trainCombinations;
if (reuseSameCVFolds)
{
preFolded = dataSet.cvSet(folds);
trainCombinations = new ArrayList<>(preFolded.size());
for (int i = 0; i < preFolded.size(); i++)
trainCombinations.add(RegressionDataSet.comineAllBut(preFolded, i));
}
else
{
preFolded = null;
trainCombinations = null;
}
ParallelUtils.run(parallel && trainModelsInParallel, paramsToEval.size(), (indx)->
{
Regressor r = paramsToEval.get(indx);
RegressionModelEvaluation cme = new RegressionModelEvaluation(r, dataSet, !trainModelsInParallel && parallel);
cme.addScorer(regressionTargetScore.clone());
if (reuseSameCVFolds)
cme.evaluateCrossValidation(preFolded, trainCombinations);
else
cme.evaluateCrossValidation(folds);
synchronized (bestModels)
{
bestModels.add(cme);
}
});
Regressor bestRegressor = bestModels.peek().getRegressor();//Just re-train it on the whole set
if (trainFinalModel)
bestRegressor.train(dataSet, parallel);
trainedRegressor = bestRegressor;
}
@Override
public RandomSearch clone()
{
return new RandomSearch(this);
}
}
| 14,950 | 36.850633 | 155 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/AveragedRegressor.java | package jsat.regression;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.classifiers.DataPoint;
/**
* Creates a regressor that averages the results of several voting regression methods.
* Null values are not supported, and will cause errors at a later time. The averaged
* regressor can be trained, and will train each of its voting regressors. If each
* regressor is of the same type, training may not be advisable.
*
* @author Edward Raff
*/
public class AveragedRegressor implements Regressor
{
private static final long serialVersionUID = 8870461208829349608L;
/**
* The array of voting regressors
*/
protected Regressor[] voters;
/**
* Constructs a new averaged regressor using the given array of voters
* @param voters the array of voters to use
*/
public AveragedRegressor(Regressor... voters)
{
if(voters == null ||voters.length == 0)
throw new RuntimeException("No voters given for construction");
this.voters = voters;
}
/**
* Constructs a new averaged regressor using the given list of voters.
* The list of voters will be copied into a new space, so the list may
* safely be reused.
* @param voters the array of voters to use
*/
public AveragedRegressor(List<Regressor> voters)
{
if(voters == null || voters.isEmpty())
throw new RuntimeException("No voters given for construction");
this.voters = voters.toArray(new Regressor[0]);
}
public double regress(DataPoint data)
{
double r = 0.0;
for(Regressor vote : voters)
r += vote.regress(data);
return r / voters.length;
}
public void train(RegressionDataSet dataSet, boolean parallel)
{
for(Regressor voter : voters)
voter.train(dataSet, parallel);
}
public void train(RegressionDataSet dataSet)
{
for(Regressor voter : voters)
voter.train(dataSet);
}
public boolean supportsWeightedData()
{
return false;
}
@Override
public AveragedRegressor clone()
{
Regressor[] clone = new Regressor[this.voters.length];
for(int i = 0; i < clone.length; i++)
clone[i] = voters[i].clone();
return new AveragedRegressor(clone);
}
}
| 2,378 | 27.662651 | 87 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/BaseUpdateableRegressor.java | package jsat.regression;
import java.util.Collections;
import java.util.concurrent.ExecutorService;
import jsat.utils.IntList;
import jsat.utils.ListUtils;
/**
* A base implementation of the UpdateableRegressor.
* {@link #train(jsat.regression.RegressionDataSet, java.util.concurrent.ExecutorService) }
* will simply call
* {@link #train(jsat.regression.RegressionDataSet) }, which will call
* {@link #setUp(jsat.classifiers.CategoricalData[], int) } and then call
* {@link #update(jsat.classifiers.DataPoint, double) } for each data point in
* a random order.
*
* @author Edward Raff
*/
public abstract class BaseUpdateableRegressor implements UpdateableRegressor
{
private static final long serialVersionUID = -679467882721432240L;
private int epochs = 1;
/**
* Sets the number of whole iterations through the training set that will be
* performed for training
* @param epochs the number of whole iterations through the data set
*/
public void setEpochs(int epochs)
{
if(epochs < 1)
throw new IllegalArgumentException("epochs must be a positive value");
this.epochs = epochs;
}
/**
* Returns the number of epochs used for training
* @return the number of epochs used for training
*/
public int getEpochs()
{
return epochs;
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
train(dataSet);
}
@Override
public void train(RegressionDataSet dataSet)
{
trainEpochs(dataSet, this, epochs);
}
/**
* Performs training on an updateable classifier by going over the whole
* data set in random order one observation at a time, multiple times.
*
* @param dataSet the data set to train from
* @param toTrain the classifier to train
* @param epochs the number of passes through the data set
*/
public static void trainEpochs(RegressionDataSet dataSet, UpdateableRegressor toTrain, int epochs)
{
if(epochs < 1)
throw new IllegalArgumentException("epochs must be positive");
toTrain.setUp(dataSet.getCategories(), dataSet.getNumNumericalVars());
IntList randomOrder = new IntList(dataSet.size());
ListUtils.addRange(randomOrder, 0, dataSet.size(), 1);
for (int epoch = 0; epoch < epochs; epoch++)
{
Collections.shuffle(randomOrder);
for (int i : randomOrder)
toTrain.update(dataSet.getDataPoint(i), dataSet.getWeight(i), dataSet.getTargetValue(i));
}
}
@Override
abstract public UpdateableRegressor clone();
}
| 2,687 | 30.623529 | 105 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/KernelRLS.java | package jsat.regression;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.DataPoint;
import jsat.distributions.kernels.KernelTrick;
import jsat.linear.*;
import jsat.parameters.Parameter.ParameterHolder;
import jsat.parameters.Parameterized;
import jsat.utils.DoubleList;
import jsat.utils.IntList;
import jsat.utils.ListUtils;
/**
* Provides an implementation of the Kernel Recursive Least Squares algorithm.
* This algorithm updates the model one per data point, and induces sparsity by
* projecting data points down onto a set of basis vectors learned from the data
* stream.
* <br><br>
* See: Engel, Y., Mannor, S.,&Meir, R. (2004). <i>The Kernel Recursive
* Least-Squares Algorithm</i>. IEEE Transactions on Signal Processing, 52(8),
* 2275–2285. doi:10.1109/TSP.2004.830985
*
* @author Edward Raff
*/
public class KernelRLS implements UpdateableRegressor, Parameterized
{
private static final long serialVersionUID = -7292074388953854317L;
@ParameterHolder
private KernelTrick k;
private double errorTolerance;
private List<Vec> vecs;
private List<Double> kernelAccel;
private Matrix K;
private Matrix InvK;
private Matrix P;
private Matrix KExpanded;
private Matrix InvKExpanded;
private Matrix PExpanded;
private double[] alphaExpanded;
/**
* Creates a new Kernel RLS learner
* @param k the kernel trick to use
* @param errorTolerance the tolerance for errors in the projection
*/
public KernelRLS(KernelTrick k, double errorTolerance)
{
this.k = k;
setErrorTolerance(errorTolerance);
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
protected KernelRLS(KernelRLS toCopy)
{
this.k = toCopy.k.clone();
this.errorTolerance = toCopy.errorTolerance;
if(toCopy.vecs != null)
{
this.vecs = new ArrayList<Vec>(toCopy.vecs.size());
for(Vec vec : toCopy.vecs)
this.vecs.add(vec.clone());
}
if(toCopy.KExpanded != null)
{
this.KExpanded = toCopy.KExpanded.clone();
this.K = new SubMatrix(KExpanded, 0, 0, vecs.size(), vecs.size());
}
if(toCopy.InvKExpanded != null)
{
this.InvKExpanded = toCopy.InvKExpanded.clone();
this.InvK = new SubMatrix(InvKExpanded, 0, 0, vecs.size(), vecs.size());
}
if(toCopy.PExpanded != null)
{
this.PExpanded = toCopy.PExpanded.clone();
this.P = new SubMatrix(PExpanded, 0, 0, vecs.size(), vecs.size());
}
if(toCopy.alphaExpanded != null)
this.alphaExpanded = Arrays.copyOf(toCopy.alphaExpanded, toCopy.alphaExpanded.length);
}
/**
* Sets the tolerance for errors in approximating a data point by projecting
* it onto the set of basis vectors. In general: as the tolerance increases
* the sparsity of the model increases but the accuracy may go down.
* <br>
* Values in the range 10<sup>x</sup> ∀ x ∈ {-1, -2, -3, -4}
* often work well for this algorithm.
*
* @param v the approximation tolerance
*/
public void setErrorTolerance(double v)
{
if(Double.isNaN(v) || Double.isInfinite(v) || v <= 0)
throw new IllegalArgumentException("The error tolerance must be a positive constant, not " + v);
this.errorTolerance = v;
}
/**
* Returns the projection approximation tolerance
* @return the projection approximation tolerance
*/
public double getErrorTolerance()
{
return errorTolerance;
}
/**
* Returns the number of basis vectors that make up the model
* @return the number of basis vectors that make up the model
*/
public int getModelSize()
{
if(vecs == null)
return 0;
return vecs.size();
}
/**
* Finalizes the model. During online training, the the gram matrix and its
* inverse must be stored to perform updates, at the cost of
* O(n<sup>2</sup>) memory. One training is completed, these matrices are no
* longer needed - and can be removed to reclaim memory by finalizing the
* model. Once finalized, the model can no longer be updated - unless reset
* (destroying the model) by calling
* {@link #setUp(jsat.classifiers.CategoricalData[], int) }
*/
public void finalizeModel()
{
alphaExpanded = Arrays.copyOf(alphaExpanded, vecs.size());//dont need extra
K = KExpanded = InvK = InvKExpanded = P = PExpanded = null;
}
@Override
public double regress(DataPoint data)
{
final Vec y = data.getNumericalValues();
return k.evalSum(vecs, kernelAccel, alphaExpanded, y, 0, vecs.size());
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
train(dataSet);
}
@Override
public void train(RegressionDataSet dataSet)
{
setUp(dataSet.getCategories(), dataSet.getNumNumericalVars());
IntList randOrder = new IntList(dataSet.size());
ListUtils.addRange(randOrder, 0, dataSet.size(), 1);
for(int i : randOrder)
update(dataSet.getDataPoint(i), dataSet.getWeight(i), dataSet.getTargetValue(i));
}
@Override
public boolean supportsWeightedData()
{
return false;
}
@Override
public KernelRLS clone()
{
return new KernelRLS(this);
}
@Override
public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes)
{
vecs = new ArrayList<Vec>();
if(k.supportsAcceleration())
kernelAccel = new DoubleList();
else
kernelAccel = null;
K = null;
InvK = null;
P = null;
KExpanded = new DenseMatrix(100, 100);
InvKExpanded = new DenseMatrix(100, 100);
PExpanded = new DenseMatrix(100, 100);
alphaExpanded = new double[100];
}
@Override
public void update(DataPoint dataPoint, double weight, final double y_t)
{
/*
* TODO a lot of temporary allocations are done in this code, but
* potentially change size - investigate storing them as well.
*/
Vec x_t = dataPoint.getNumericalValues();
final List<Double> qi = k.getQueryInfo(x_t);
final double k_tt = k.eval(0, 0, Arrays.asList(x_t), qi);
if(K == null)//first point to be added
{
K = new SubMatrix(KExpanded, 0, 0, 1, 1);
K.set(0, 0, k_tt);
InvK = new SubMatrix(InvKExpanded, 0, 0, 1, 1);
InvK.set(0, 0, 1/k_tt);
P = new SubMatrix(PExpanded, 0, 0, 1, 1);
P.set(0, 0, 1);
alphaExpanded[0] = y_t/k_tt;
vecs.add(x_t);
if(kernelAccel != null)
kernelAccel.addAll(qi);
return;
}
//Normal case
DenseVector kxt = new DenseVector(K.rows());
for (int i = 0; i < kxt.length(); i++)
kxt.set(i, k.eval(i, x_t, qi, vecs, kernelAccel));
//ALD test
final Vec alphas_t = InvK.multiply(kxt);
final double delta_t = k_tt-alphas_t.dot(kxt);
final int size = K.rows();
final double alphaConst = kxt.dot(new DenseVector(alphaExpanded, 0, size));
if(delta_t > errorTolerance)//add to the dictionary
{
vecs.add(x_t);
if(kernelAccel != null)
kernelAccel.addAll(qi);
if(size == KExpanded.rows())//we need to grow first
{
KExpanded.changeSize(size*2, size*2);
InvKExpanded.changeSize(size*2, size*2);
PExpanded.changeSize(size*2, size*2);
alphaExpanded = Arrays.copyOf(alphaExpanded, size*2);
}
Matrix.OuterProductUpdate(InvK, alphas_t, alphas_t, 1/delta_t);
K = new SubMatrix(KExpanded, 0, 0, size+1, size+1);
InvK = new SubMatrix(InvKExpanded, 0, 0, size+1, size+1);
P = new SubMatrix(PExpanded, 0, 0, size+1, size+1);
//update bottom row and side columns
for(int i = 0; i < size; i++)
{
K.set(size, i, kxt.get(i));
K.set(i, size, kxt.get(i));
InvK.set(size, i, -alphas_t.get(i)/delta_t);
InvK.set(i, size, -alphas_t.get(i)/delta_t);
//P is zeros, no change
}
//update bottom right corner
K.set(size, size, k_tt);
InvK.set(size, size, 1/delta_t);
P.set(size, size, 1.0);
for(int i = 0; i < size; i++)
alphaExpanded[i] -= alphas_t.get(i)*(y_t-alphaConst)/delta_t;
alphaExpanded[size] = (y_t-alphaConst)/delta_t;
}
else//project onto dictionary
{
Vec q_t =P.multiply(alphas_t);
q_t.mutableDivide(1+alphas_t.dot(q_t));
Matrix.OuterProductUpdate(P, q_t, alphas_t.multiply(P), -1);
Vec InvKqt = InvK.multiply(q_t);
for(int i = 0; i < size; i++)
alphaExpanded[i] += InvKqt.get(i)*(y_t-alphaConst);
}
}
}
| 9,622 | 31.843003 | 108 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/KernelRidgeRegression.java | package jsat.regression;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import jsat.DataSet;
import jsat.classifiers.DataPoint;
import jsat.distributions.Distribution;
import jsat.distributions.LogUniform;
import jsat.distributions.kernels.KernelTrick;
import jsat.distributions.kernels.RBFKernel;
import jsat.linear.CholeskyDecomposition;
import jsat.linear.DenseMatrix;
import jsat.linear.Matrix;
import jsat.linear.Vec;
import jsat.parameters.Parameter.ParameterHolder;
import jsat.parameters.Parameterized;
import jsat.utils.concurrent.ParallelUtils;
/**
* A kernelized implementation of Ridge Regression. Ridge
* Regression is equivalent to {@link MultipleLinearRegression} with an added
* L<sub>2</sub> penalty for the weight vector. <br><br>
* This algorithm is very expensive to compute O(n<sup>3</sup>), where n is the
* number of training points.
*
* @author Edward Raff
*/
public class KernelRidgeRegression implements Regressor, Parameterized
{
private static final long serialVersionUID = 6275333785663250072L;
private double lambda;
@ParameterHolder
private KernelTrick k;
private List<Vec> vecs;
private double[] alphas;
/**
* Creates a new Kernel Ridge Regression learner that uses an RBF kernel
*/
public KernelRidgeRegression()
{
this(1e-6, new RBFKernel());
}
/**
* Creates a new Kernel Ridge Regression learner
* @param lambda the regularization parameter
* @param kernel the kernel to use
* @see #setLambda(double)
*/
public KernelRidgeRegression(double lambda, KernelTrick kernel)
{
setLambda(lambda);
setKernel(kernel);
}
/**
* Copy Constructor
* @param toCopy the object to copy
*/
protected KernelRidgeRegression(KernelRidgeRegression toCopy)
{
this(toCopy.lambda, toCopy.getKernel().clone());
if(toCopy.alphas != null)
this.alphas = Arrays.copyOf(toCopy.alphas, toCopy.alphas.length);
if(toCopy.vecs != null)
this.vecs = new ArrayList<>(toCopy.vecs);
}
/**
* Guesses the distribution to use for the λ parameter
*
* @param d the dataset to get the guess for
* @return the guess for the λ parameter
*/
public static Distribution guessLambda(DataSet d)
{
return new LogUniform(1e-7, 1e-2);
}
/**
* Sets the regularization parameter used. The value of lambda depends on
* the data set and kernel used, with easier problems using smaller lambdas.
* @param lambda the positive regularization constant in (0, Inf)
*/
public void setLambda(double lambda)
{
if(Double.isNaN(lambda) || Double.isInfinite(lambda) || lambda <= 0)
throw new IllegalArgumentException("lambda must be a positive constant, not " + lambda);
this.lambda = lambda;
}
/**
* Returns the regularization constant in use
* @return the regularization constant in use
*/
public double getLambda()
{
return lambda;
}
/**
* Sets the kernel trick to use
* @param k the kernel to use
*/
public void setKernel(KernelTrick k)
{
this.k = k;
}
/**
* Returns the kernel in use
* @return the kernel in use
*/
public KernelTrick getKernel()
{
return k;
}
@Override
public double regress(DataPoint data)
{
Vec x = data.getNumericalValues();
double score = 0;
for(int i = 0; i < alphas.length; i++)
score += alphas[i] * k.eval(vecs.get(i), x);
return score;
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
final int N = dataSet.size();
vecs = new ArrayList<>(N);
//alphas initalized later
Vec Y = dataSet.getTargetValues();
for(int i = 0; i < N; i++)
vecs.add(dataSet.getDataPoint(i).getNumericalValues());
final Matrix K = new DenseMatrix(N, N);
ParallelUtils.run(parallel, N, (i)->
{
K.set(i, i, k.eval(vecs.get(i), vecs.get(i)) + lambda);//diagonal values
for (int j = i + 1; j < N; j++)
{
double K_ij = k.eval(vecs.get(i), vecs.get(j));
K.set(i, j, K_ij);
K.set(j, i, K_ij);
}
});
CholeskyDecomposition cd;
if(parallel)
cd = new CholeskyDecomposition(K, ParallelUtils.CACHED_THREAD_POOL);
else
cd = new CholeskyDecomposition(K);
Vec alphaTmp = cd.solve(Y);
alphas = alphaTmp.arrayCopy();
}
@Override
public boolean supportsWeightedData()
{
return false;
}
@Override
public KernelRidgeRegression clone()
{
return new KernelRidgeRegression(this);
}
}
| 4,941 | 27.24 | 100 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/MultipleLinearRegression.java |
package jsat.regression;
import jsat.SingleWeightVectorModel;
import jsat.classifiers.DataPoint;
import jsat.classifiers.DataPointPair;
import jsat.linear.DenseMatrix;
import jsat.linear.DenseVector;
import jsat.linear.Matrix;
import jsat.linear.QRDecomposition;
import jsat.linear.Vec;
import jsat.utils.concurrent.ParallelUtils;
/**
*
* @author Edward Raff
*/
public class MultipleLinearRegression implements Regressor, SingleWeightVectorModel
{
private static final long serialVersionUID = 7694194181910565061L;
/**
* The vector B such that Y = X * B is the least squares solution. Will be stored as Y = X * B + a
*/
private Vec B;
/**
* The offset value that is not multiplied by any variable
*/
private double a;
private boolean useWeights = false;
public MultipleLinearRegression()
{
this(true);
}
public MultipleLinearRegression(boolean useWeights)
{
this.useWeights = useWeights;
}
@Override
public double regress(DataPoint data)
{
return B.dot(data.getNumericalValues())+a;
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
if(dataSet.getNumCategoricalVars() > 0)
throw new RuntimeException("Multiple Linear Regression only works with numerical values");
int sda = dataSet.size();
DenseMatrix X = new DenseMatrix(dataSet.size(), dataSet.getNumNumericalVars()+1);
DenseVector Y = new DenseVector(dataSet.size());
//Construct matrix and vector, Y = X * B, we will solve for B or its least squares solution
for(int i = 0; i < dataSet.size(); i++)
{
DataPointPair<Double> dpp = dataSet.getDataPointPair(i);
Y.set(i, dpp.getPair());
X.set(i, 0, 1.0);//First column is all ones
Vec vals = dpp.getVector();
for(int j = 0; j < vals.length(); j++)
X.set(i, j+1, vals.get(j));
}
if(useWeights)
{
//The sqrt(weight) vector can be applied to X and Y, and then QR can procede as normal
Vec weights = new DenseVector(dataSet.size());
for(int i = 0; i < dataSet.size(); i++)
weights.set(i, Math.sqrt(dataSet.getWeight(i)));
Matrix.diagMult(weights, X);
Y.mutablePairwiseMultiply(weights);
}
Matrix[] QR = parallel ? X.qr(ParallelUtils.CACHED_THREAD_POOL) : X.qr();
QRDecomposition qrDecomp = new QRDecomposition(QR[0], QR[1]);
Vec tmp = qrDecomp.solve(Y);
a = tmp.get(0);
B = new DenseVector(dataSet.getNumNumericalVars());
for(int i = 1; i < tmp.length(); i++)
B.set(i-1, tmp.get(i));
}
@Override
public boolean supportsWeightedData()
{
return useWeights;
}
@Override
public Vec getRawWeight()
{
return B;
}
@Override
public double getBias()
{
return a;
}
@Override
public Vec getRawWeight(int index)
{
if(index < 1)
return getRawWeight();
else
throw new IndexOutOfBoundsException("Model has only 1 weight vector");
}
@Override
public double getBias(int index)
{
if (index < 1)
return getBias();
else
throw new IndexOutOfBoundsException("Model has only 1 weight vector");
}
@Override
public int numWeightsVecs()
{
return 1;
}
@Override
public MultipleLinearRegression clone()
{
MultipleLinearRegression copy = new MultipleLinearRegression();
if(B != null)
copy.B = this.B.clone();
copy.a = this.a;
return copy;
}
}
| 3,876 | 25.37415 | 102 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/NadarayaWatson.java |
package jsat.regression;
import java.util.*;
import jsat.classifiers.DataPoint;
import jsat.classifiers.bayesian.BestClassDistribution;
import jsat.distributions.multivariate.MultivariateKDE;
import jsat.linear.Vec;
import jsat.linear.VecPaired;
import jsat.parameters.Parameter.ParameterHolder;
import jsat.parameters.Parameterized;
/**
* The Nadaraya-Watson regressor uses the {@link MultivariateKDE Kernel Density Estimator } to perform regression on a data set. <br>
* Nadaraya-Watson can also be expressed as a classifier, and equivalent results can be obtained by combining a KDE with {@link BestClassDistribution}.
*
* @author Edward Raff
*/
public class NadarayaWatson implements Regressor, Parameterized
{
private static final long serialVersionUID = 8632599345930394763L;
@ParameterHolder
private MultivariateKDE kde;
public NadarayaWatson(MultivariateKDE kde)
{
this.kde = kde;
}
@Override
public double regress(DataPoint data)
{
List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> nearBy = kde.getNearby(data.getNumericalValues());
if(nearBy.isEmpty())
return 0;///hmmm... what should be retruned in this case?
double weightSum = 0;
double sum = 0;
for(VecPaired<VecPaired<Vec, Integer>, Double> v : nearBy)
{
double weight = v.getPair();
double regressionValue = ( (VecPaired<Vec, Double>) v.getVector().getVector()).getPair();
weightSum += weight;
sum += weight*regressionValue;
}
return sum / weightSum;
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
List<VecPaired<Vec, Double>> vectors = collectVectors(dataSet);
kde.setUsingData(vectors, parallel);
}
private List<VecPaired<Vec, Double>> collectVectors(RegressionDataSet dataSet)
{
List<VecPaired<Vec, Double>> vectors = new ArrayList<>(dataSet.size());
for(int i = 0; i < dataSet.size(); i++)
vectors.add(new VecPaired<>(dataSet.getDataPoint(i).getNumericalValues(), dataSet.getTargetValue(i)));
return vectors;
}
@Override
public boolean supportsWeightedData()
{
return true;
}
@Override
public NadarayaWatson clone()
{
return new NadarayaWatson((MultivariateKDE)kde.clone());
}
}
| 2,436 | 29.848101 | 152 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/OrdinaryKriging.java | package jsat.regression;
import static java.lang.Math.pow;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.classifiers.DataPoint;
import static jsat.linear.DenseVector.toDenseVec;
import jsat.linear.*;
import jsat.parameters.*;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.ParallelUtils;
/**
* An implementation of Ordinary Kriging with support for a uniform error
* measurement. When an {@link #getMeasurementError() error} value is applied, Kriging
* becomes equivalent to Gaussian Processes Regression.
*
* @author Edward Raff
*/
public class OrdinaryKriging implements Regressor, Parameterized
{
private static final long serialVersionUID = -5774553215322383751L;
private Variogram vari;
/**
* The weight values for each data point
*/
private Vec X;
private RegressionDataSet dataSet;
private double errorSqrd;
private double nugget;
/**
* The default nugget value is {@value #DEFAULT_NUGGET}
*/
public static final double DEFAULT_NUGGET = 0.1;
/**
* The default error value is {@link #DEFAULT_ERROR}
*/
public static final double DEFAULT_ERROR = 0.1;
/**
* Creates a new Ordinary Kriging.
*
* @param vari the variogram to fit to the data
* @param error the global measurement error
* @param nugget the nugget value to add to the variogram
*/
public OrdinaryKriging(Variogram vari, double error, double nugget)
{
this.vari = vari;
setMeasurementError(error);
this.nugget = nugget;
}
/**
* Creates a new Ordinary Kriging
* @param vari the variogram to fit to the data
* @param error the global measurement error
*/
public OrdinaryKriging(Variogram vari, double error)
{
this(vari, error, DEFAULT_NUGGET);
}
/**
* Creates a new Ordinary Kriging with a small error value
* @param vari the variogram to fit to the data
*/
public OrdinaryKriging(Variogram vari)
{
this(vari, DEFAULT_ERROR);
}
/**
* Creates a new Ordinary Kriging with a small error value using the
* {@link PowVariogram power} variogram.
*/
public OrdinaryKriging()
{
this(new PowVariogram());
}
@Override
public double regress(DataPoint data)
{
Vec x = data.getNumericalValues();
int npt = X.length()-1;
double[] distVals = new double[npt+1];
for (int i = 0; i < npt; i++)
distVals[i] = vari.val(x.pNormDist(2, dataSet.getDataPoint(i).getNumericalValues()));
distVals[npt] = 1.0;
return X.dot(toDenseVec(distVals));
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
this.dataSet = dataSet;
/**
* Size of the data set
*/
int N = dataSet.size();
/**
* Stores the target values
*/
Vec Y = new DenseVector(N+1);
Matrix V = new DenseMatrix(N+1, N+1);
vari.train(dataSet, nugget);
setUpVectorMatrix(N, dataSet, V, Y, parallel);
for(int i = 0; i < N; i++)
V.increment(i, i, -errorSqrd);
LUPDecomposition lup;
if(parallel)
lup = new LUPDecomposition(V, ParallelUtils.CACHED_THREAD_POOL);
else
lup = new LUPDecomposition(V);
X = lup.solve(Y);
if(Double.isNaN(lup.det()) || Math.abs(lup.det()) < 1e-5)
{
SingularValueDecomposition svd = new SingularValueDecomposition(V);
X = svd.solve(Y);
}
}
private void setUpVectorMatrix(final int N, final RegressionDataSet dataSet, final Matrix V, final Vec Y, boolean parallel)
{
ParallelUtils.run(parallel, N, (i)->
{
DataPoint dpi = dataSet.getDataPoint(i);
Vec xi = dpi.getNumericalValues();
for (int j = 0; j < N; j++)
{
Vec xj = dataSet.getDataPoint(j).getNumericalValues();
double val = vari.val(xi.pNormDist(2, xj));
V.set(i, j, val);
V.set(j, i, val);
}
V.set(i, N, 1.0);
V.set(N, i, 1.0);
Y.set(i, dataSet.getTargetValue(i));
});
V.set(N, N, 0);
}
@Override
public boolean supportsWeightedData()
{
return false;
}
@Override
public OrdinaryKriging clone()
{
OrdinaryKriging clone = new OrdinaryKriging(vari.clone());
clone.setMeasurementError(getMeasurementError());
clone.setNugget(getNugget());
if(this.X != null)
clone.X = this.X.clone();
if(this.dataSet != null)
clone.dataSet = this.dataSet;
return clone;
}
/**
* Sets the measurement error used for Kriging, which is equivalent to
* altering the diagonal values of the covariance. While the measurement
* errors could be per data point, this implementation provides only a
* global error. If the error is set to zero, it will perfectly interpolate
* all data points. <br>
* Increasing the error smooths the interpolation, and has a large impact on
* the regression results.
*
* @param error the measurement error for all data points
*/
public void setMeasurementError(double error)
{
this.errorSqrd = error*error;
}
/**
* Returns the measurement error used for Kriging, which is equivalent to
* altering the diagonal values of the covariance. While the measurement
* errors could be per data point, this implementation provides only a
* global error. If the error is set to zero, it will perfectly interpolate
* all data points.
*
* @return the global error used for the data
*/
public double getMeasurementError()
{
return Math.sqrt(errorSqrd);
}
/**
* Sets the nugget value passed to the variogram during training. The
* nugget allows the variogram to start from a non-zero value, and is
* equivalent to alerting the off diagonal values of the covariance. <br>
* Altering the nugget value has only a minor impact on the output
*
* @param nugget the new nugget value
* @throws ArithmeticException if a negative nugget value is provided
*/
public void setNugget(double nugget)
{
if(nugget < 0 || Double.isNaN(nugget) || Double.isInfinite(nugget))
throw new ArithmeticException("Nugget must be a positive value");
this.nugget = nugget;
}
/**
* Returns the nugget value passed to the variogram during training. The
* nugget allows the variogram to start from a non-zero value, and is
* equivalent to alerting the off diagonal values of the covariance.
*
* @return the nugget added to the variogram
*/
public double getNugget()
{
return nugget;
}
public static interface Variogram extends Cloneable
{
/**
* Sets the values of the variogram
* @param dataSet the data set to learn the parameters from
* @param nugget the nugget value to add tot he variogram, may be
* ignored if the variogram want to fit it automatically
*/
public void train(RegressionDataSet dataSet, double nugget);
/**
* Returns the output of the variogram for the given input
* @param r the input value
* @return the output of the variogram
*/
public double val(double r);
public Variogram clone();
}
public static class PowVariogram implements Variogram
{
private double alpha;
private double beta;
public PowVariogram()
{
this(1.5);
}
public PowVariogram(double beta)
{
this.beta = beta;
}
@Override
public void train(RegressionDataSet dataSet, double nugget)
{
int npt=dataSet.size();
double num=0,denom=0, nugSqrd = nugget*nugget;
for (int i = 0; i < npt; i++)
{
Vec xi = dataSet.getDataPoint(i).getNumericalValues();
double yi = dataSet.getTargetValue(i);
for (int j = i + 1; j < npt; j++)
{
Vec xj = dataSet.getDataPoint(j).getNumericalValues();
double yj = dataSet.getTargetValue(j);
double rb = pow(xi.pNormDist(2, xj), beta);
num += rb* (0.5* pow(yi-yj, 2)-nugSqrd);
denom += rb*rb;
}
}
alpha = num / denom;
}
@Override
public double val(double r)
{
return alpha*pow(r, beta);
}
@Override
public Variogram clone()
{
PowVariogram clone = new PowVariogram(beta);
clone.alpha = this.alpha;
return clone;
}
}
}
| 9,362 | 29.203226 | 127 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/RANSAC.java | package jsat.regression;
import java.util.*;
import java.util.concurrent.*;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.classifiers.DataPoint;
import jsat.classifiers.DataPointPair;
import jsat.exceptions.FailedToFitException;
import jsat.parameters.*;
import jsat.parameters.Parameter.ParameterHolder;
import jsat.utils.FakeExecutor;
import jsat.utils.IntSet;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* RANSAC is a randomized meta algorithm that is useful for fitting a model to a
* data set that has a large amount of outliers that do not represent the true
* distribution well. <br>
* RANSAC has the concept of inliers and outliers. An initial number of seed
* points is specified. This makes the initial inlier set. The algorithm than
* iterates several times, randomly selecting the specified number of points. It
* then regresses on all other points, adding all points within a specified
* absolute error to the set of inliers. The model is then trained again on the
* larger set, and the training error becomes the measure of the strength of the
* model. The model that has the lowest error is then the fit model.
*
* @author Edward Raff
*/
public class RANSAC implements Regressor, Parameterized
{
private static final long serialVersionUID = -5015748604828907703L;
/**
* the minimum number of data required to fit the model
*/
private int initialTrainSize;
/**
* the number of iterations performed by the algorithm
*/
private int iterations;
/**
* a threshold value for determining when a datum fits a model
*/
private double maxPointError;
/**
* the number of close data values required to assert that a model fits well to data
*/
private int minResultSize;
@ParameterHolder
private Regressor baseRegressor;
/**
* True marks that the data point is part of the consensus set.
* False indicates it is not.
*/
private boolean[] consensusSet;
private double modelError;
/**
* Creates a new RANSAC training object. Because RANSAC is sensitive to
* parameter settings, which are data and model dependent, no default values
* exist for them.
*
* @param baseRegressor the model to fit using RANSAC
* @param iterations the number of iterations of the algorithm to perform
* @param initialTrainSize the number of points to seed each iteration of
* training with
* @param minResultSize the minimum number of inliers to make it into the
* model to be considered a possible fit.
* @param maxPointError the maximum allowed absolute difference in the
* output of the model and the true value for the data point to be added to
* the inlier set.
*/
public RANSAC(Regressor baseRegressor, int iterations, int initialTrainSize, int minResultSize, double maxPointError)
{
setInitialTrainSize(initialTrainSize);
setIterations(iterations);
setMaxPointError(maxPointError);
setMinResultSize(minResultSize);
this.baseRegressor = baseRegressor;
}
/**
* class that does the loop iteration work and returns a reference to
* itself. The are sortable based on the lowest error
*/
private class RANSACWorker implements Callable<RANSACWorker>, Comparable<RANSACWorker>
{
int maxIterations;
RegressionDataSet dataset;
Random rand;
Regressor baseModel;
public RANSACWorker(Regressor baseModel, int maxIterations, RegressionDataSet dataset)
{
this.baseModel = baseModel;
this.maxIterations = maxIterations;
this.dataset = dataset;
rand = RandomUtil.getRandom();
}
//To be determined
Regressor bestModel = null;
boolean[] bestConsensusSet = null;
double bestError = Double.POSITIVE_INFINITY;
@Override
public RANSACWorker call() throws Exception
{
bestConsensusSet = new boolean[dataset.size()];
boolean[] working_set = new boolean[dataset.size()];
Set<Integer> maybe_inliers = new IntSet(initialTrainSize*2);
for(int iter = 0; iter < maxIterations; iter++)
{
//Create sub data set sample
maybe_inliers.clear();
Arrays.fill(working_set, false);
while(maybe_inliers.size() < initialTrainSize)
maybe_inliers.add(rand.nextInt(working_set.length));
int consensusSize = maybe_inliers.size();
RegressionDataSet subDataSet = new RegressionDataSet(dataset.getNumNumericalVars(), dataset.getCategories());
for(int i : maybe_inliers)
{
subDataSet.addDataPointPair(dataset.getDataPointPair(i));
working_set[i] = true;
}
Regressor maybeModel = baseModel.clone();
maybeModel.train(subDataSet);
//Build consensus set
for(int i = 0; i < working_set.length; i++)
{
if(working_set[i])
continue;//Already part of the model
DataPointPair<Double> dpp = dataset.getDataPointPair(i);
double guess = maybeModel.regress(dpp.getDataPoint());
double diff = Math.abs(guess - dpp.getPair());
if(diff < maxPointError)
{
working_set[i] = true;//Add tot he consenus set
subDataSet.addDataPointPair(dpp);
consensusSize++;
}
}
if(consensusSize < minResultSize )
continue;//We did not fit enough points to be considered
//Build final model
maybeModel.train(subDataSet);
//Copmute final model error on the consenus set
double thisError = 0;
for(int i = 0; i < working_set.length; i++)
{
if(!working_set[i])
continue;
DataPointPair<Double> dpp = dataset.getDataPointPair(i);
double guess = maybeModel.regress(dpp.getDataPoint());
double diff = Math.abs(guess - dpp.getPair());
thisError += diff;
}
if(thisError < bestError)//New best model
{
bestError = thisError;
bestModel = maybeModel;
System.arraycopy(working_set, 0, bestConsensusSet, 0, working_set.length);
}
}
return this;
}
@Override
public int compareTo(RANSACWorker o)
{
return Double.compare(this.bestError, o.bestError);
}
}
/**
* Returns the number of data points to be sampled from the training set to
* create initial models.
*
* @return the number of data points used to first create models
*/
public int getInitialTrainSize()
{
return initialTrainSize;
}
/**
* Sets the number of data points to be sampled from the training set to
* create initial models.
*
* @param initialTrainSize the number of data points to use to create models
*/
public void setInitialTrainSize(int initialTrainSize)
{
if(initialTrainSize < 1)
throw new RuntimeException("Can not train on an empty data set");
this.initialTrainSize = initialTrainSize;
}
/**
* Returns the number models that will be tested on the data set.
*
* @return the number of algorithm iterations
*/
public int getIterations()
{
return iterations;
}
/**
* Sets the number models that will be tested on the data set.
* @param iterations the number of iterations to perform
*/
public void setIterations(int iterations)
{
if(iterations < 1)
throw new RuntimeException("Must perform a positive number of iterations");
this.iterations = iterations;
}
/**
* Each data point not in the initial training set will be tested against.
* If a data points error is sufficiently small, it will be added to the set
* of inliers.
*
* @return the maximum error any one point may have to be an inliner
*/
public double getMaxPointError()
{
return maxPointError;
}
/**
* Each data point not in the initial training set will be tested against.
* If a data points error is sufficiently small, it will be added to the set
* of inliers.
*
* @param maxPointError the new maximum error a data point may have to be
* considered an inlier.
*/
public void setMaxPointError(double maxPointError)
{
if(maxPointError < 0 || Double.isInfinite(maxPointError) || Double.isNaN(maxPointError))
throw new ArithmeticException("The error must be a positive value, not " + maxPointError );
this.maxPointError = maxPointError;
}
/**
* RANSAC requires an initial model to be accurate enough to include a
* minimum number of inliers before being considered as a potentially good
* model. This is the number of points that must make it into the inlier set
* for a model to be considered.
*
* @return the minimum number of inliers to be considered
*/
public int getMinResultSize()
{
return minResultSize;
}
/**
* RANSAC requires an initial model to be accurate enough to include a
* minimum number of inliers before being considered as a potentially good
* model. This is the number of points that must make it into the inlier set
* for a model to be considered.
*
* @param minResultSize the minimum number of inliers to be considered
*/
public void setMinResultSize(int minResultSize)
{
if(minResultSize < getInitialTrainSize())
throw new RuntimeException("The min result size must be larger than the intial train size");
this.minResultSize = minResultSize;
}
@Override
public double regress(DataPoint data)
{
return baseRegressor.regress(data);
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
try
{
int workSize = iterations/SystemInfo.LogicalCores;
int leftOver = iterations%SystemInfo.LogicalCores;
List<Future<RANSACWorker>> futures = new ArrayList<>(SystemInfo.LogicalCores+1);
ExecutorService threadPool = parallel ? ParallelUtils.CACHED_THREAD_POOL : new FakeExecutor();
if(leftOver != 0)
futures.add(threadPool.submit(new RANSACWorker(baseRegressor, leftOver, dataSet)));
for(int i = 0; i < SystemInfo.LogicalCores; i++)
futures.add(threadPool.submit(new RANSACWorker(baseRegressor, workSize, dataSet)));
PriorityQueue<RANSACWorker> results = new PriorityQueue<>(SystemInfo.LogicalCores+1);
for( Future<RANSACWorker> futureWorker : futures )
results.add(futureWorker.get());
RANSACWorker bestResult = results.peek();
modelError = bestResult.bestError;
if(Double.isInfinite(modelError))
throw new FailedToFitException("Model could not be fit, inlier set never reach minimum size");
baseRegressor = bestResult.bestModel;
consensusSet = bestResult.bestConsensusSet;
}
catch (InterruptedException | ExecutionException ex)
{
Logger.getLogger(RANSAC.class.getName()).log(Level.SEVERE, null, ex);
throw new FailedToFitException(ex);
}
}
@Override
public boolean supportsWeightedData()
{
return baseRegressor.supportsWeightedData();
}
@Override
public RANSAC clone()
{
RANSAC clone = new RANSAC(baseRegressor.clone(), iterations, initialTrainSize, minResultSize, maxPointError);
return clone;
}
/**
* Once RANSAC is complete, it maintains its trained version of the
* finalized regressor. A clone of it may be retrieved from this method.
* @return a clone of the learned regressor
*/
public Regressor getBaseRegressorClone()
{
return baseRegressor.clone();
}
/**
* Returns an boolean array where the indices correspond to data points in
* the original training set. <tt>true</tt> indicates that the data point
* was apart of the final consensus set. <tt>false</tt> indicates that it
* was not.
*
* @return a boolean array indicating which points made it into the
* consensus set
*/
public boolean[] getConsensusSet()
{
return Arrays.copyOf(consensusSet, consensusSet.length);
}
/**
* Returns the model error, which is the average absolute difference between
* the model and all points in the set of inliers.
*
* @return the error for the learned model. Returns
* {@link Double#POSITIVE_INFINITY} if the model has not been trained or
* failed to fit.
*/
public double getModelError()
{
return modelError;
}
}
| 13,942 | 34.659847 | 125 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/RegressionDataSet.java |
package jsat.regression;
import java.util.*;
import jsat.DataSet;
import jsat.DataStore;
import jsat.RowMajorStore;
import jsat.classifiers.*;
import jsat.linear.DenseVector;
import jsat.linear.IndexValue;
import jsat.linear.SparseVector;
import jsat.linear.Vec;
import jsat.utils.DoubleList;
import jsat.utils.IntList;
import jsat.utils.ListUtils;
/**
* A RegressionDataSet is a data set specifically for the task of performing regression.
* Each data point is paired with s double value that indicates its true regression value.
* An example of a regression problem would be mapping the inputs of a function to its
* outputs, and attempting to learn the function from the samples.
*
* @author Edward Raff
*/
public class RegressionDataSet extends DataSet<RegressionDataSet>
{
protected DoubleList targets;
/**
* Creates a new empty data set for regression
*
* @param numerical the number of numerical attributes that will be used, excluding the regression value
* @param categories an array of length equal to the number of categorical attributes, each object describing the attribute in question
*/
public RegressionDataSet(int numerical, CategoricalData[] categories)
{
super(numerical, categories);
targets = new DoubleList();
}
/**
* Creates a new dataset containing the given points paired with their
* target values. Pairing is determined by the iteration order of each
* collection.
*
* @param datapoints the DataStore that will back this Data Set
* @param targets the target values to use
*/
public RegressionDataSet(DataStore datapoints, List<Double> targets)
{
super(datapoints);
this.targets = new DoubleList(targets);
}
/**
* Creates a new data set for the given list of data points. The data
* points will be copied, changes in one will not effect the other.
*
* @param data the list of data point to create a data set from
* @param predicting which of the numerical attributes is the
* regression target. Categorical attributes are ignored in
* the count of attributes for this value.
*/
public RegressionDataSet(List<DataPoint> data, int predicting)
{
super(data.get(0).numNumericalValues()-1, data.get(0).getCategoricalData());
//Use the first data point to set up
DataPoint tmp = data.get(0);
categories = new CategoricalData[tmp.numCategoricalValues()];
System.arraycopy(tmp.getCategoricalData(), 0, categories, 0, categories.length);
targets = new DoubleList(data.size());
//Fill up data
for(DataPoint dp : data)
{
Vec origV = dp.getNumericalValues();
Vec newVec;
double target = 0;//init to zero to inplicitly handle sparse feature vector case
if (origV.isSparse())
newVec = new SparseVector(origV.length() - 1, origV.nnz());
else
newVec = new DenseVector(origV.length() - 1);
for (IndexValue iv : origV)
if (iv.getIndex() < predicting)
newVec.set(iv.getIndex(), iv.getValue());
else if (iv.getIndex() == predicting)
target = iv.getValue();
else//iv.getIndex() > index
newVec.set(iv.getIndex() - 1, iv.getValue());
DataPoint newDp = new DataPoint(newVec, dp.getCategoricalValues(), categories);
datapoints.addDataPoint(newDp);
targets.add(target);
}
}
/**
* Creates a new regression data set by copying all the data points
* in the given list. Alterations to this list will not effect this DataSet.
* @param list source of data points to copy
*/
public RegressionDataSet(List<DataPointPair<Double>> list)
{
super(list.get(0).getDataPoint().numNumericalValues(), CategoricalData.copyOf(list.get(0).getDataPoint().getCategoricalData()));
this.datapoints = new RowMajorStore(numNumerVals, categories);
this.targets = new DoubleList();
for(DataPointPair<Double> dpp : list)
{
datapoints.addDataPoint(dpp.getDataPoint());
targets.add(dpp.getPair());
}
}
private RegressionDataSet()
{
super(new RowMajorStore(1, new CategoricalData[0]));
}
public static RegressionDataSet comineAllBut(List<RegressionDataSet> list, int exception)
{
int numer = list.get(exception).getNumNumericalVars();
CategoricalData[] categories = list.get(exception).getCategories();
RegressionDataSet rds = new RegressionDataSet(numer, categories);
//The list of data sets
for (int i = 0; i < list.size(); i++)
if (i == exception)
continue;
else
for(int j = 0; j < list.get(i).size(); j++)
rds.addDataPoint(list.get(i).getDataPoint(j), list.get(i).getTargetValue(j));
return rds;
}
private static final int[] emptyInt = new int[0];
/**
* Creates a new data point with no categorical variables to be added to the
* data set. The arguments will be used directly, modifying them after will
* effect the data set.
*
* @param numerical the numerical values for the data point
* @param val the taret value
* @throws IllegalArgumentException if the given values are inconsistent with the data this class stores.
*/
public void addDataPoint(Vec numerical, double val)
{
addDataPoint(numerical, emptyInt, val);
}
/**
* Creates a new data point to be added to the data set. The arguments will
* be used directly, modifying them after will effect the data set.
*
* @param numerical the numerical values for the data point
* @param categories the categorical values for the data point
* @param val the target value to predict
* @throws IllegalArgumentException if the given values are inconsistent with the data this class stores.
*/
public void addDataPoint(Vec numerical, int[] categories, double val)
{
if(numerical.length() != numNumerVals)
throw new RuntimeException("Data point does not contain enough numerical data points");
if(this.categories.length != categories.length)
throw new RuntimeException("Data point does not contain enough categorical data points");
for(int i = 0; i < categories.length; i++)
if(!this.categories[i].isValidCategory(categories[i]) && categories[i] >= 0) // >= so that missing values (negative) are allowed
throw new RuntimeException("Categoriy value given is invalid");
DataPoint dp = new DataPoint(numerical, categories, this.categories);
addDataPoint(dp, val);
}
/**
*
* @param dp the data to add
* @param val the target value for this data point
*/
public void addDataPoint(DataPoint dp, double val)
{
addDataPoint(dp, val, 1.0);
}
/**
*
* @param dp the data to add
* @param val the target value for this data point
* @param weight the weight for this data point
*/
public void addDataPoint(DataPoint dp, double val, double weight)
{
if(dp.numNumericalValues() != getNumNumericalVars() || dp.numCategoricalValues() != getNumCategoricalVars())
throw new RuntimeException("The added data point does not match the number of values and categories for the data set");
else if(Double.isInfinite(val) || Double.isNaN(val))
throw new ArithmeticException("Unregressiable value " + val + " given for regression");
datapoints.addDataPoint(dp);
targets.add(val);
setWeight(size()-1, weight);
}
public void addDataPointPair(DataPointPair<Double> pair)
{
addDataPoint(pair.getDataPoint(), pair.getPair());
}
/**
* Returns the i'th data point in the data set paired with its target regressor value.
* Modifying the DataPointPair will effect the data set.
*
* @param i the index of the data point to obtain
* @return the i'th DataPOintPair
*/
public DataPointPair<Double> getDataPointPair(int i)
{
return new DataPointPair<>(getDataPoint(i), targets.get(i));
}
/**
* Returns a new list containing copies of the data points in this data set,
* paired with their regression target values. MModifications to the list
* or data points will not effect this data set
*
* @return a list of copies of the data points in this set
*/
public List<DataPointPair<Double>> getAsDPPList()
{
ArrayList<DataPointPair<Double>> list = new ArrayList<>(size());
for(int i = 0; i < size(); i++)
list.add(new DataPointPair<>(getDataPoint(i).clone(), targets.get(i)));
return list;
}
/**
* Returns a new list containing the data points in this data set, paired with
* their regression target values. Modifications to the list will not effect
* the data set, but modifying the points will. For a copy of the points, use
* the {@link #getAsDPPList() } method.
*
* @return a list of the data points in this set
*/
public List<DataPointPair<Double>> getDPPList()
{
ArrayList<DataPointPair<Double>> list = new ArrayList<>(size());
for(int i = 0; i < size(); i++)
list.add(getDataPointPair(i));
return list;
}
/**
* Sets the target regression value associated with a given data point
* @param i the index in the data set
* @param val the new target value
* @throws ArithmeticException if <tt>val</tt> is infinite or NaN
*/
public void setTargetValue(int i, double val)
{
if(Double.isInfinite(val) || Double.isNaN(val))
throw new ArithmeticException("Can not predict a " + val + " value");
targets.set(i, val);
}
@Override
protected RegressionDataSet getSubset(List<Integer> indicies)
{
if (this.datapoints.rowMajor())
{
RegressionDataSet newData = new RegressionDataSet(numNumerVals, categories);
for (int i : indicies)
newData.addDataPoint(getDataPoint(i), getTargetValue(i));
return newData;
}
else //copy columns at a time to make it faster please!
{
int new_n = indicies.size();
//when we do the vectors, due to potential sparse inputs, we want to do this faster when iterating over values that may/may-not be good and spaced oddly
Map<Integer, Integer> old_indx_to_new = new HashMap<>(indicies.size());
for(int new_i = 0; new_i < indicies.size(); new_i++)
old_indx_to_new.put(indicies.get(new_i), new_i);
DataStore new_ds = this.datapoints.emptyClone();
Iterator<DataPoint> data_iter = this.datapoints.getRowIter();
DoubleList new_targets = new DoubleList();
int orig_pos = 0;
while(data_iter.hasNext())
{
DataPoint dp = data_iter.next();
if(old_indx_to_new.containsKey(orig_pos))
{
DataPoint new_dp = new DataPoint(dp.getNumericalValues().clone(),Arrays.copyOf( dp.getCategoricalValues(), this.getNumCategoricalVars()), categories);
new_ds.addDataPoint(new_dp);
new_targets.add(this.getTargetValue(orig_pos));
}
orig_pos++;
}
new_ds.finishAdding();
return new RegressionDataSet(new_ds, new_targets);
}
}
/**
* Returns a vector containing the target regression values for each
* data point. The vector is a copy, and modifications to it will not
* effect the data set.
*
* @return a vector containing the target values for each data point
*/
public Vec getTargetValues()
{
DenseVector vals = new DenseVector(size());
for(int i = 0; i < size(); i++)
vals.set(i, targets.getD(i));
return vals;
}
/**
* Returns the target regression value for the <tt>i</tt>'th data point in the data set.
*
* @param i the data point to get the regression value of
* @return the target regression value
*/
public double getTargetValue(int i)
{
return targets.getD(i);
}
/**
* Creates a new data set that uses the given list as its backing list.
* No copying is done, and changes to this list will be reflected in
* this data set, and the other way.
*
* @param list the list of datapoint to back a new data set with
* @return a new data set
*/
public static RegressionDataSet usingDPPList(List<DataPointPair<Double>> list)
{
return new RegressionDataSet(list);
}
@Override
public RegressionDataSet shallowClone()
{
RegressionDataSet clone = new RegressionDataSet(numNumerVals, categories);
for(int i = 0; i < size(); i++)
clone.addDataPointPair(getDataPointPair(i));
return clone;
}
@Override
public RegressionDataSet emptyClone()
{
return new RegressionDataSet(numNumerVals, categories);
}
@Override
public RegressionDataSet getTwiceShallowClone()
{
return (RegressionDataSet) super.getTwiceShallowClone(); //To change body of generated methods, choose Tools | Templates.
}
}
| 13,538 | 35.591892 | 157 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/RegressionModelEvaluation.java | package jsat.regression;
import static java.lang.Math.*;
import java.util.*;
import java.util.Map.Entry;
import jsat.classifiers.*;
import jsat.datatransform.DataTransformProcess;
import jsat.exceptions.UntrainedModelException;
import jsat.math.OnLineStatistics;
import jsat.regression.evaluation.RegressionScore;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* Provides a mechanism to quickly evaluate a regression model on a data set.
* This can be done by cross validation or with a separate testing set.
*
* @author Edward Raff
*/
public class RegressionModelEvaluation
{
private Regressor regressor;
private RegressionDataSet dataSet;
/**
* The source of threads
*/
private boolean parallel;
private OnLineStatistics sqrdErrorStats;
private long totalTrainingTime = 0, totalClassificationTime = 0;
private DataTransformProcess dtp;
private Map<RegressionScore, OnLineStatistics> scoreMap;
private boolean keepModels = false;
/**
* This holds models for each index that will be kept. If using a test set,
* only index 0 is used.
*/
private Regressor[] keptModels;
/**
* This holds models for each fold index that will be used for warm starts.
* If using a test set, only index 0 is used.
*/
private Regressor[] warmModels;
/**
* Creates a new RegressionModelEvaluation that will perform parallel training.
* @param regressor the regressor model to evaluate
* @param dataSet the data set to train or perform cross validation from
* @param parallel {@code true} if the training should be done using
* multiple-cores, {@code false} for single threaded.
*/
public RegressionModelEvaluation(Regressor regressor, RegressionDataSet dataSet, boolean parallel)
{
this.regressor = regressor;
this.dataSet = dataSet;
this.parallel = parallel;
this.dtp =new DataTransformProcess();
scoreMap = new LinkedHashMap<>();
}
/**
* Creates a new RegressionModelEvaluation that will perform serial training
* @param regressor the regressor model to evaluate
* @param dataSet the data set to train or perform cross validation from
*/
public RegressionModelEvaluation(Regressor regressor, RegressionDataSet dataSet)
{
this(regressor, dataSet, false);
}
/**
* Set this to {@code true} in order to keep the trained models after
* evaluation. They can then be retrieved used the {@link #getKeptModels() }
* methods. The default value is {@code false}.
*
* @param keepModels {@code true} to keep the trained models after
* evaluation, {@code false} to discard them.
*/
public void setKeepModels(boolean keepModels)
{
this.keepModels = keepModels;
}
/**
* This will keep the models trained when evaluating the model. The models
* can be obtained after an evaluation from {@link #getKeptModels() }.
*
* @return {@code true} if trained models will be kept after evaluation.
*/
public boolean isKeepModels()
{
return keepModels;
}
/**
* Returns the models that were kept after the last evaluation. {@code null}
* will be returned instead if {@link #isKeepModels() } returns
* {@code false}, which is the default.
*
* @return the models that were kept after the last evaluation. Or
* {@code null} if if models are not being kept.
*/
public Regressor[] getKeptModels()
{
return keptModels;
}
/**
* Sets the models that will be used for warm starting training. If using
* cross-validation, the number of models given should match the number of
* folds. If using a test set, only one model should be given.
*
* @param warmModels the models to use for warm start training
*/
public void setWarmModels(Regressor... warmModels)
{
this.warmModels = warmModels;
}
/**
* Sets the data transform process to use when performing cross validation.
* By default, no transforms are applied
* @param dtp the transformation process to clone for use during evaluation
*/
public void setDataTransformProcess(DataTransformProcess dtp)
{
this.dtp = dtp.clone();
}
/**
* Performs an evaluation of the regressor using the training data set.
* The evaluation is done by performing cross validation.
* @param folds the number of folds for cross validation
* @throws UntrainedModelException if the number of folds given is less than 2
*/
public void evaluateCrossValidation(int folds)
{
evaluateCrossValidation(folds, RandomUtil.getRandom());
}
/**
* Performs an evaluation of the regressor using the training data set.
* The evaluation is done by performing cross validation.
* @param folds the number of folds for cross validation
* @param rand the source of randomness for generating the cross validation sets
* @throws UntrainedModelException if the number of folds given is less than 2
*/
public void evaluateCrossValidation(int folds, Random rand)
{
if(folds < 2)
throw new UntrainedModelException("Model could not be evaluated because " + folds + " is < 2, and not valid for cross validation");
List<RegressionDataSet> lcds = dataSet.cvSet(folds, rand);
evaluateCrossValidation(lcds);
}
/**
* Performs an evaluation of the regressor using the training data set,
* where the folds of the training data set are provided by the user. The
* folds do not need to be the same sizes, though it is assumed that they
* are all approximately the same size. It is the caller's responsibility to
* ensure that the folds are only from the original training data set. <br>
* <br>
* This method exists so that the user can provide very specific folds if
* they so desire. This can be useful when there is known bias in the data
* set, such as when caused by duplicate data point values. The caller can
* then manually make sure duplicate values all occur in the same fold to
* avoid over-estimating the accuracy of the model.
*
* @param lcds the training data set already split into folds
*/
public void evaluateCrossValidation(List<RegressionDataSet> lcds)
{
List<RegressionDataSet> trainCombinations = new ArrayList<RegressionDataSet>(lcds.size());
for (int i = 0; i < lcds.size(); i++)
trainCombinations.add(RegressionDataSet.comineAllBut(lcds, i));
evaluateCrossValidation(lcds, trainCombinations);
}
/**
* Note: Most people should never need to call this method. Make sure you
* understand what you are doing before you do.<br>
* <br>
* Performs an evaluation of the regressor using the training data set,
* where the folds of the training data set, and their combinations, are
* provided by the user. The folds do not need to be the same sizes, though
* it is assumed that they are all approximately the same size - and the the
* training combination corresponding to each index will be the sum of the
* folds in the other indices. It is the caller's responsibility to ensure
* that the folds are only from the original training data set. <br>
* <br>
* This method exists so that the user can provide very specific folds if
* they so desire, and when the same folds will be used multiple times.
* Doing so allows the algorithms called to take advantage of any potential
* caching of results based on the data set and avoid all possible excessive
* memory movement. (For example, {@link jsat.DataSet#getNumericColumns() } may
* get re-used and benefit from its caching)<br>
* The same behavior of this method can be obtained by calling {@link #evaluateCrossValidation(java.util.List)
* }.
*
* @param lcds training data set already split into folds
* @param trainCombinations each index contains the training data sans the
* data stored in the fold associated with that index
*/
public void evaluateCrossValidation(List<RegressionDataSet> lcds, List<RegressionDataSet> trainCombinations)
{
sqrdErrorStats = new OnLineStatistics();
totalTrainingTime = totalClassificationTime = 0;
for(int i = 0; i < lcds.size(); i++)
{
RegressionDataSet trainSet = trainCombinations.get(i);
RegressionDataSet testSet = lcds.get(i);
evaluationWork(trainSet, testSet, i);
}
}
/**
* Performs an evaluation of the regressor using the initial data set to
* train, and testing on the given data set.
* @param testSet the data set to perform testing on
*/
public void evaluateTestSet(RegressionDataSet testSet)
{
sqrdErrorStats = new OnLineStatistics();
totalTrainingTime = totalClassificationTime = 0;
evaluationWork(dataSet, testSet, 0);
}
private void evaluationWork(RegressionDataSet trainSet, RegressionDataSet testSet, int index)
{
trainSet = trainSet.shallowClone();
DataTransformProcess curProccess = dtp.clone();
curProccess.learnApplyTransforms(trainSet);
long startTrain = System.currentTimeMillis();
final Regressor regressorTouse = regressor.clone();
if(warmModels != null && regressorTouse instanceof WarmRegressor)//train from the warm model
{
WarmRegressor wr = (WarmRegressor) regressorTouse;
wr.train(trainSet, warmModels[index], parallel);
}
else//do the normal thing
{
regressorTouse.train(trainSet, parallel);
}
totalTrainingTime += (System.currentTimeMillis() - startTrain);
if(keptModels != null)
keptModels[index] = regressorTouse;
//place to store the scores that may get updated by several threads
final Map<RegressionScore, RegressionScore> scoresToUpdate = new HashMap<>();
for(Entry<RegressionScore, OnLineStatistics> entry : scoreMap.entrySet())
{
RegressionScore score = entry.getKey().clone();
score.prepare();
scoresToUpdate.put(score, score);
}
// CountDownLatch latch;
// if(testSet.getSampleSize() < SystemInfo.LogicalCores || !parallel)
// {
// latch = new CountDownLatch(1);
// new Evaluator(testSet, curProccess, 0, testSet.getSampleSize(), scoresToUpdate, regressorTouse, latch).run();
// }
// else//go parallel!
// {
// latch = new CountDownLatch(SystemInfo.LogicalCores);
// final int blockSize = testSet.getSampleSize()/SystemInfo.LogicalCores;
// int extra = testSet.getSampleSize()%SystemInfo.LogicalCores;
//
// int start = 0;
// while(start < testSet.getSampleSize())
// {
// int end = start+blockSize;
// if(extra-- > 0)
// end++;
// parallel.submit(new Evaluator(testSet, curProccess, start, end, scoresToUpdate, regressorTouse, latch));
// start = end;
// }
// }
ParallelUtils.run(parallel, testSet.size(), (start, end)->
{
//create a local set of scores to update
long localPredictionTime = 0;
OnLineStatistics localSqrdErrors = new OnLineStatistics();
Set<RegressionScore> localScores = new HashSet<>();
for (Entry<RegressionScore, RegressionScore> entry : scoresToUpdate.entrySet())
localScores.add(entry.getKey().clone());
for (int i = start; i < end; i++)
{
DataPoint di = testSet.getDataPoint(i);
double trueVal = testSet.getTargetValue(i);
DataPoint tranDP = curProccess.transform(di);
long startTime = System.currentTimeMillis();
double predVal = regressorTouse.regress(tranDP);
localPredictionTime += (System.currentTimeMillis() - startTime);
double sqrdError = pow(trueVal - predVal, 2);
for (RegressionScore score : localScores)
score.addResult(predVal, trueVal, testSet.getWeight(i));
localSqrdErrors.add(sqrdError, testSet.getWeight(i));
}
synchronized (sqrdErrorStats)
{
sqrdErrorStats.add(localSqrdErrors);
totalClassificationTime += localPredictionTime;
for (RegressionScore score : localScores)
scoresToUpdate.get(score).addResults(score);
}
});
//accumulate score info
for (Entry<RegressionScore, OnLineStatistics> entry : scoreMap.entrySet())
{
RegressionScore score = entry.getKey().clone();
score.prepare();
score.addResults(scoresToUpdate.get(score));
entry.getValue().add(score.getScore());
}
}
/**
* Adds a new score object that will be used as part of the evaluation when
* calling {@link #evaluateCrossValidation(int, java.util.Random) } or
* {@link #evaluateTestSet(jsat.regression.RegressionDataSet) }. The
* statistics for the given score are reset on every call, and the mean /
* standard deviation comes from multiple folds in cross validation. <br>
* <br>
* The score statistics can be obtained from
* {@link #getScoreStats(jsat.regression.evaluation.RegressionScore) }
* after one of the evaluation methods have been called.
*
* @param scorer the score method to keep track of.
*/
public void addScorer(RegressionScore scorer)
{
scoreMap.put(scorer, new OnLineStatistics());
}
/**
* Gets the statistics associated with the given score. If the score is not
* currently in the model evaluation {@code null} will be returned. The
* object passed in does not need to be the exact same object passed to
* {@link #addScorer(jsat.regression.evaluation.RegressionScore) },
* it only needs to be equal to the object.
*
* @param score the score type to get the result statistics
* @return the result statistics for the given score, or {@code null} if the
* score is not in th evaluation set
*/
public OnLineStatistics getScoreStats(RegressionScore score)
{
return scoreMap.get(score);
}
/**
* Prints out the classification information in a convenient format. If no
* additional scores were added via the
* {@link #addScorer(RegressionScore) }
* method, nothing will be printed.
*/
public void prettyPrintRegressionScores()
{
int nameLength = 10;
for(Entry<RegressionScore, OnLineStatistics> entry : scoreMap.entrySet())
nameLength = Math.max(nameLength, entry.getKey().getName().length()+2);
final String pfx = "%-" + nameLength;//prefix
for(Entry<RegressionScore, OnLineStatistics> entry : scoreMap.entrySet())
System.out.printf(pfx+"s %-5f (%-5f)\n", entry.getKey().getName(), entry.getValue().getMean(), entry.getValue().getStandardDeviation());
}
/**
* Returns the minimum squared error from all runs.
* @return the minimum observed squared error
*/
public double getMinError()
{
return sqrdErrorStats.getMin();
}
/**
* Returns the maximum squared error observed from all runs.
* @return the maximum observed squared error
*/
public double getMaxError()
{
return sqrdErrorStats.getMax();
}
/**
* Returns the mean squared error from all runs.
* @return the overall mean squared error
*/
public double getMeanError()
{
return sqrdErrorStats.getMean();
}
/**
* Returns the standard deviation of the error from all runs
* @return the overall standard deviation of the errors
*/
public double getErrorStndDev()
{
return sqrdErrorStats.getStandardDeviation();
}
/***
* Returns the total number of milliseconds spent training the regressor.
* @return the total number of milliseconds spent training the regressor.
*/
public long getTotalTrainingTime()
{
return totalTrainingTime;
}
/**
* Returns the total number of milliseconds spent performing regression on the testing set.
* @return the total number of milliseconds spent performing regression on the testing set.
*/
public long getTotalClassificationTime()
{
return totalClassificationTime;
}
/**
* Returns the regressor that was to be evaluated
* @return the regressor original given
*/
public Regressor getRegressor()
{
return regressor;
}
}
| 17,279 | 37.485523 | 148 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/Regressor.java |
package jsat.regression;
import java.io.Serializable;
import java.util.concurrent.ExecutorService;
import jsat.classifiers.DataPoint;
/**
*
* @author Edward Raff
*/
public interface Regressor extends Cloneable, Serializable
{
public double regress(DataPoint data);
public void train(RegressionDataSet dataSet, boolean parallel);
default public void train(RegressionDataSet dataSet)
{
train(dataSet, false);
}
public boolean supportsWeightedData();
public Regressor clone();
}
| 538 | 18.962963 | 67 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/RidgeRegression.java | package jsat.regression;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.classifiers.DataPoint;
import jsat.linear.*;
import jsat.parameters.Parameter;
import jsat.parameters.Parameterized;
import jsat.utils.FakeExecutor;
import jsat.utils.concurrent.ParallelUtils;
/**
* An implementation of Ridge Regression that finds the exact solution. Ridge
* Regression is equivalent to {@link MultipleLinearRegression} with an added
* L<sub>2</sub> penalty for the weight vector. <br><br>
* Two different methods of finding the solution can be used. This algorithm
* should be used only for small dimensions problems with a reasonable number of
* example points.<br>
* For large dimension sparse problems, or dense problems with many data points
* (or both), use the {@link StochasticRidgeRegression}. For small data sets
* that pose non-linear problems, you can also use {@link KernelRidgeRegression}
*
* @author Edward Raff
*/
public class RidgeRegression implements Regressor, Parameterized
{
private static final long serialVersionUID = -4605757038780391895L;
private double lambda;
private Vec w;
private double bias;
private SolverMode mode;
/**
* Sets which solver to use
*/
public enum SolverMode
{
/**
* Solves by {@link CholeskyDecomposition}
*/
EXACT_CHOLESKY,
/**
* Solves by {@link SingularValueDecomposition}
*/
EXACT_SVD,
}
public RidgeRegression()
{
this(1e-2);
}
public RidgeRegression(double regularization)
{
this(regularization, SolverMode.EXACT_CHOLESKY);
}
public RidgeRegression(double regularization, SolverMode mode)
{
setLambda(regularization);
setSolverMode(mode);
}
/**
* Sets the regularization parameter used.
* @param lambda the positive regularization constant in (0, Inf)
*/
public void setLambda(double lambda)
{
if(Double.isNaN(lambda) || Double.isInfinite(lambda) || lambda <= 0)
throw new IllegalArgumentException("lambda must be a positive constant, not " + lambda);
this.lambda = lambda;
}
/**
* Returns the regularization constant in use
* @return the regularization constant in use
*/
public double getLambda()
{
return lambda;
}
/**
* Sets which solver is to be used
* @param mode the solver mode to use
*/
public void setSolverMode(SolverMode mode)
{
this.mode = mode;
}
/**
* Returns the solver in use
* @return the solver to use
*/
public SolverMode getSolverMode()
{
return mode;
}
@Override
public double regress(DataPoint data)
{
Vec x = data.getNumericalValues();
return w.dot(x)+bias;
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
final int dim = dataSet.getNumNumericalVars()+1;
DenseMatrix X = new DenseMatrix(dataSet.size(), dim);
for(int i = 0; i < dataSet.size(); i++)
{
Vec from = dataSet.getDataPoint(i).getNumericalValues();
X.set(i, 0, 1.0);
for(int j = 0; j < from.length(); j++)
X.set(i, j+1, from.get(j));
}
final Vec Y = dataSet.getTargetValues();
final boolean serial = !parallel;
if(mode == SolverMode.EXACT_SVD)
{
SingularValueDecomposition svd = new SingularValueDecomposition(X);
double[] ridgeD;
ridgeD = Arrays.copyOf(svd.getSingularValues(), dim);
for(int i = 0; i < ridgeD.length; i++)
ridgeD[i] = 1 / (Math.pow(ridgeD[i], 2)+lambda);
Matrix U = svd.getU();
Matrix V = svd.getV();
// w = V (D^2 + lambda I)^(-1) D U^T y
Matrix.diagMult(V, DenseVector.toDenseVec(ridgeD));
Matrix.diagMult(V, DenseVector.toDenseVec(svd.getSingularValues()));
w = V.multiply(U.transpose()).multiply(Y);
}
else//cholesky
{
Matrix H = serial ? X.transposeMultiply(X) : X.transposeMultiply(X, ParallelUtils.CACHED_THREAD_POOL);
//H + I * reg equiv to H.mutableAdd(Matrix.eye(H.rows()).multiply(regularization));
for(int i = 0; i < H.rows(); i++)
H.increment(i, i, lambda);
CholeskyDecomposition cd = serial ? new CholeskyDecomposition(H) : new CholeskyDecomposition(H, ParallelUtils.CACHED_THREAD_POOL);
w = cd.solve(Matrix.eye(H.rows())).multiply(X.transpose()).multiply(Y);
}
//reformat w and seperate out bias term
bias = w.get(0);
Vec newW = new DenseVector(w.length()-1);
for(int i = 0; i < newW.length(); i++)
newW.set(i, w.get(i+1));
w = newW;
}
@Override
public boolean supportsWeightedData()
{
return false;
}
@Override
public RidgeRegression clone()
{
RidgeRegression clone = new RidgeRegression(lambda);
if(this.w != null)
clone.w = this.w.clone();
clone.bias = this.bias;
return clone;
}
}
| 5,336 | 28.486188 | 142 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/StochasticGradientBoosting.java |
package jsat.regression;
import java.util.*;
import jsat.classifiers.DataPoint;
import jsat.classifiers.DataPointPair;
import jsat.exceptions.UntrainedModelException;
import jsat.math.Function1D;
import jsat.math.rootfinding.RootFinder;
import jsat.math.rootfinding.Zeroin;
import jsat.parameters.Parameterized;
import jsat.utils.DoubleList;
import jsat.utils.IntList;
import jsat.utils.ListUtils;
import jsat.utils.random.RandomUtil;
/**
* An implementation of Stochastic Gradient Boosting (SGB) for the Squared Error
* loss. SGB is also known as Gradient Boosting Machine. There is a specialized
* version of SGB known as TreeBoost, that is not implemented by this method.
* SGB is a boosting method derived for regression. It uses many weak learners
* by attempting to estimate the residual error of all previous learners. It can
* also use an initial strong learner and use the weak learners to refine the
* initial estimate.
*
* <br><br>
* See papers:<br>
* Friedman, J. H. (2002).
* <a href="http://onlinelibrary.wiley.com/doi/10.1002/cbdv.200490137/abstract">
* Stochastic gradient boosting</a>. Computational Statistics&Data Analysis,
* 38(4), 367–378.
* <br><br>
* Mohan, A., Chen, Z.,&Weinberger, K. (2011).
* <a href="http://www1.cse.wustl.edu/~kilian/papers/mohan11a.pdf">Web-search
* ranking with initialized gradient boosted regression trees</a>.
* Journal of Machine Learning Research, 14,
*
*
*
* @author Edward Raff
*/
public class StochasticGradientBoosting implements Regressor, Parameterized
{
private static final long serialVersionUID = -2855154397476855293L;
/**
* The default value for the
* {@link #setTrainingProportion(double) training proportion} is
* {@value #DEFAULT_TRAINING_PROPORTION}.
*/
public static final double DEFAULT_TRAINING_PROPORTION = 0.5;
/**
* The default value for the {@link #setLearningRate(double) } is
* {@value #DEFAULT_LEARNING_RATE}
*/
public static final double DEFAULT_LEARNING_RATE = 0.1;
/**
* The proportion of the data set to be used for each iteration of training.
* The points that make up the iteration are a random sampling without
* replacement.
*/
private double trainingProportion;
private Regressor weakLearner;
private Regressor strongLearner;
/**
* The ordered list of weak learners
*/
private List<Regressor> F;
/**
* The list of learner coefficients for each weak learner.
*/
private List<Double> coef;
private double learningRate;
private int maxIterations;
/**
* Creates a new initialized SGB learner.
*
* @param strongLearner the powerful learner to refine with weak learners
* @param weakLearner the weak learner to fit to the residuals in each iteration
* @param maxIterations the maximum number of algorithm iterations to perform
* @param learningRate the multiplier to apply to the weak learners
* @param trainingPortion the proportion of the data set to use for each iteration of learning
*/
public StochasticGradientBoosting(Regressor strongLearner, Regressor weakLearner, int maxIterations, double learningRate, double trainingPortion)
{
this.trainingProportion = trainingPortion;
this.strongLearner = strongLearner;
this.weakLearner = weakLearner;
this.learningRate = learningRate;
this.maxIterations = maxIterations;
}
/**
* Creates a new SGB learner that is initialized using the weak learner.
*
* @param weakLearner the weak learner to fit to the residuals in each iteration
* @param maxIterations the maximum number of algorithm iterations to perform
* @param learningRate the multiplier to apply to the weak learners
* @param trainingPortion the proportion of the data set to use for each iteration of learning
*/
public StochasticGradientBoosting(Regressor weakLearner, int maxIterations, double learningRate, double trainingPortion)
{
this(null, weakLearner, maxIterations, learningRate, trainingPortion);
}
/**
* Creates a new SGB learner that is initialized using the weak learner.
*
* @param weakLearner the weak learner to fit to the residuals in each iteration
* @param maxIterations the maximum number of algorithm iterations to perform
* @param learningRate the multiplier to apply to the weak learners
*/
public StochasticGradientBoosting(Regressor weakLearner, int maxIterations, double learningRate)
{
this(weakLearner, maxIterations, learningRate, DEFAULT_TRAINING_PROPORTION);
}
/**
* Creates a new SGB learner that is initialized using the weak learner.
*
* @param weakLearner the weak learner to fit to the residuals in each iteration
* @param maxIterations the maximum number of algorithm iterations to perform
*/
public StochasticGradientBoosting(Regressor weakLearner, int maxIterations)
{
this(weakLearner, maxIterations, DEFAULT_LEARNING_RATE);
}
/**
* Sets the maximum number of iterations used in SGB.
*
* @param maxIterations the maximum number of algorithm iterations to perform
*/
public void setMaxIterations(int maxIterations)
{
this.maxIterations = maxIterations;
}
/**
* Returns the maximum number of iterations used in SGB
* @return the maximum number of algorithm iterations to perform
*/
public int getMaxIterations()
{
return maxIterations;
}
/**
* Sets the learning rate of the algorithm. The GB version uses a learning
* rate of 1. SGB uses a learning rate in (0,1) to avoid overfitting. The
* learning rate is multiplied by the output of each weak learner to reduce
* its contribution.
*
* @param learningRate the multiplier to apply to the weak learners
* @throws ArithmeticException if the learning rate is not in the range (0, 1]
*/
public void setLearningRate(double learningRate)
{
//+- Inf case captured in >1 <= 0 case
if(learningRate > 1 || learningRate <= 0 || Double.isNaN(learningRate))
throw new ArithmeticException("Invalid learning rate");
this.learningRate = learningRate;
}
/**
* Returns the learning rate of the algorithm used to control overfitting.
* @return the learning rate multiplier applied to the weak learner outputs
*/
public double getLearningRate()
{
return learningRate;
}
/**
* The GB version uses the whole data set at each iteration. SGB can use a
* fraction of the data set at each iteration in order to reduce overfitting
* and add randomness.
*
* @param trainingProportion the fraction of training the data set to use
* for each iteration of SGB
* @throws ArithmeticException if the trainingPortion is not a valid
* fraction in (0, 1]
*/
public void setTrainingProportion(double trainingProportion)
{
//+- Inf case captured in >1 <= 0 case
if(trainingProportion > 1 || trainingProportion <= 0 || Double.isNaN(trainingProportion))
throw new ArithmeticException("Training Proportion is invalid");
this.trainingProportion = trainingProportion;
}
/**
* Returns the fraction of the data points used during each iteration of the
* training algorithm.
*
* @return the fraction of the training data set to use for each
* iteration of SGB
*/
public double getTrainingProportion()
{
return trainingProportion;
}
@Override
public double regress(DataPoint data)
{
if(F == null || F.isEmpty())
throw new UntrainedModelException();
double result = 0;
for(int i =0; i < F.size(); i++)
result += F.get(i).regress(data)*coef.get(i);
return result;
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
//use getAsDPPList to get coppies of the data points, so we can safely alter this set
final RegressionDataSet resids = dataSet.shallowClone();
F = new ArrayList<>(maxIterations);
coef = new DoubleList(maxIterations);
//Add the first learner. Either an instance of the weak learner, or a strong initial estimate
Regressor lastF = strongLearner == null ? weakLearner.clone() : strongLearner.clone();
lastF.train(dataSet, parallel);
F.add(lastF);
coef.add(learningRate*getMinimizingErrorConst(dataSet, lastF));
/**
* Instead of recomputing previous weak learner's output, keep track of
* the current total sum to know the current prediction value
*/
final double[] currPredictions = new double[dataSet.size()];
final int randSampleSize = (int) Math.round(resids.size()*trainingProportion);
final List<DataPointPair<Double>> randSampleList = new ArrayList<>(randSampleSize);
final Random rand = RandomUtil.getRandom();
IntList randOrder = IntList.range(resids.size());
for(int iter = 0; iter < maxIterations; iter++)
{
final double lastCoef = coef.get(iter);
lastF = F.get(iter);
//Compute the new residuals
for(int j = 0; j < resids.size(); j++)
{
//Update the current total preduction values while we do this
double lastFPred = lastF.regress(resids.getDataPoint(j));
currPredictions[j] += lastCoef*lastFPred;
//The next set of residuals could be computed from the previous,
//but its more stable to just take the total residuals fromt he
//source each time
resids.setTargetValue(j, (dataSet.getTargetValue(j)-currPredictions[j]));
}
//Take a random sample
Collections.shuffle(randOrder, rand);
RegressionDataSet subSet = resids.shallowClone();
for(int i : randOrder.subList(0, randSampleSize))
subSet.addDataPoint(resids.getDataPoint(i), resids.getTargetValue(i), resids.getWeight(i));
final Regressor h = weakLearner.clone();
h.train(subSet, parallel);
double y = getMinimizingErrorConst( resids, h);
F.add(h);
coef.add(learningRate*y);
}
System.out.println();
}
/**
* Finds the constant <tt>y</tt> such that the squared error of the
* Regressor <tt>h</tt> on the set of residuals <tt>backingResidsList</tt>
* is minimized.
* @param backingResidsList the DataPointPair list of residuals
* @param h the regressor that is having the error of its output minimized
* @return the constant <tt>y</tt> that minimizes the squared error of the regressor on the training set.
*/
private double getMinimizingErrorConst(final RegressionDataSet backingResidsList, final Regressor h)
{
//Find the coeficent that minimized the residual error by finding the zero of its derivative (local minima)
Function1D fhPrime = getDerivativeFunc(backingResidsList, h);
RootFinder rf = new Zeroin();
double y = rf.root(1e-4, 50, new double[]{-2.5, 2.5}, fhPrime);
return y;
}
/**
* Returns a function object that approximates the derivative of the squared
* error of the Regressor as a function of the constant factor multiplied on
* the Regressor's output.
*
* @param backingResidsList the DataPointPair list of residuals
* @param h the regressor that is having the error of its output minimized
* @return a Function object approximating the derivative of the squared error
*/
private Function1D getDerivativeFunc(final RegressionDataSet backingResidsList, final Regressor h)
{
final Function1D fhPrime = (double x) ->
{
double c1 = x;//c2=c1-eps
double eps = 1e-5;
double c1Pc2 = c1 * 2 - eps;//c1+c2 = c1+c1-eps
double result = 0;
/*
* Computing the estimate of the derivative directly, f'(x) approx = f(x)-f(x-eps)
*
* hEst is the output of the new regressor, target is the true residual target value
*
* So we have several
* (hEst_i c1 - target)^2 - (hEst_i c2 -target)^2 //4 muls, 3 subs
* Where c2 = c1-eps
* Which simplifies to
* (c1 - c2) hEst ((c1 + c2) hEst - 2 target)
* =
* eps hEst (c1Pc2 hEst - 2 target)//3 muls, 1 sub, 1 shift (mul by 2)
*
* because eps is on the outside and independent of each
* individual summation, we can move it out and do the eps
* multiplicatio ont he final result. Reducing us to
*
* 2 muls, 1 sub, 1 shift (mul by 2)
*
* per loop
*
* Which reduce computation, and allows us to get the result
* in one pass of the data
*/
for(int i = 0; i < backingResidsList.size(); i++)
{
double hEst = h.regress(backingResidsList.getDataPoint(i));
double target = backingResidsList.getTargetValue(i);
result += hEst * (c1Pc2 * hEst - 2 * target);
}
return result * eps;
};
return fhPrime;
}
@Override
public boolean supportsWeightedData()
{
if(strongLearner != null)
return strongLearner.supportsWeightedData() && weakLearner.supportsWeightedData();
return weakLearner.supportsWeightedData();
}
@Override
public StochasticGradientBoosting clone()
{
StochasticGradientBoosting clone = new StochasticGradientBoosting(weakLearner.clone(), maxIterations, learningRate, trainingProportion);
if(F != null)
{
clone.F = new ArrayList<>(F.size());
for(Regressor f : this.F)
clone.F.add(f.clone());
}
if(coef != null)
{
clone.coef = new DoubleList(this.coef);
}
if(strongLearner != null)
clone.strongLearner = this.strongLearner.clone();
return clone;
}
}
| 14,755 | 36.168766 | 149 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/StochasticRidgeRegression.java |
package jsat.regression;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutorService;
import jsat.SingleWeightVectorModel;
import jsat.classifiers.DataPoint;
import jsat.linear.DenseVector;
import jsat.linear.IndexValue;
import jsat.linear.Vec;
import jsat.math.decayrates.DecayRate;
import jsat.math.decayrates.NoDecay;
import jsat.parameters.Parameter;
import jsat.parameters.Parameterized;
import jsat.utils.IntList;
import jsat.utils.ListUtils;
/**
* A Stochastic implementation of Ridge Regression. Ridge
* Regression is equivalent to {@link MultipleLinearRegression} with an added
* L<sub>2</sub> penalty for the weight vector. <br><br>
* This algorithm works best for problems with a large number of data points or
* very high dimensional problems.
*
* @author Edward Raff
*/
public class StochasticRidgeRegression implements Regressor, Parameterized, SingleWeightVectorModel
{
private static final long serialVersionUID = -3462783438115627128L;
private double lambda;
private int epochs;
private int batchSize;
private double learningRate;
private DecayRate learningDecay;
private Vec w;
private double bias;
/**
* Creates a new stochastic Ridge Regression learner that does not use a
* decay rate
* @param lambda the regularization term
* @param epochs the number of training epochs to perform
* @param batchSize the batch size for updates
* @param learningRate the learning rate
*/
public StochasticRidgeRegression(double lambda, int epochs, int batchSize, double learningRate)
{
this(lambda, epochs, batchSize, learningRate, new NoDecay());
}
/**
* Creates a new stochastic Ridge Regression learner
* @param lambda the regularization term
* @param epochs the number of training epochs to perform
* @param batchSize the batch size for updates
* @param learningRate the learning rate
* @param learningDecay the learning rate decay
*/
public StochasticRidgeRegression(double lambda, int epochs, int batchSize, double learningRate, DecayRate learningDecay)
{
setLambda(lambda);
setEpochs(epochs);
setBatchSize(batchSize);
setLearningRate(learningRate);
setLearningDecay(learningDecay);
}
/**
* Sets the regularization parameter used.
* @param lambda the positive regularization constant in (0, Inf)
*/
public void setLambda(double lambda)
{
if(Double.isNaN(lambda) || Double.isInfinite(lambda) || lambda <= 0)
throw new IllegalArgumentException("lambda must be a positive constant, not " + lambda);
this.lambda = lambda;
}
/**
* Returns the regularization constant in use
* @return the regularization constant in use
*/
public double getLambda()
{
return lambda;
}
/**
* Sets the learning rate used, and should be in the range (0, 1).
*
* @param learningRate the learning rate to use
*/
public void setLearningRate(double learningRate)
{
this.learningRate = learningRate;
}
/**
* Returns the learning rate in use.
* @return the learning rate to use.
*/
public double getLearningRate()
{
return learningRate;
}
/**
* Sets the learning rate decay function to use. The decay is applied after
* each epoch through the data set. Using a decay rate can reduce the time
* to converge and quality of the solution for difficult problems.
* @param learningDecay the decay function to apply to the learning rate
*/
public void setLearningDecay(DecayRate learningDecay)
{
this.learningDecay = learningDecay;
}
/**
* Returns the learning decay rate used
* @return the learning decay rate used
*/
public DecayRate getLearningDecay()
{
return learningDecay;
}
/**
* Sets the batch size to learn from. If larger than the training set, the
* problem will reduce to classic gradient descent.
*
* @param batchSize the number of training points to use in each batch update
*/
public void setBatchSize(int batchSize)
{
if(batchSize <= 0)
throw new IllegalArgumentException("Batch size must be a positive constant, not " + batchSize);
this.batchSize = batchSize;
}
/**
* Returns the batch size for updates
* @return the batch size for updates
*/
public int getBatchSize()
{
return batchSize;
}
/**
* Sets the number of iterations through the whole training set that will be
* performed.
* @param epochs the number of training iterations
*/
public void setEpochs(int epochs)
{
if(epochs <= 0)
throw new IllegalArgumentException("At least one epoch must be performed, can not use " + epochs);
this.epochs = epochs;
}
/**
* Returns the number of training iterations
* @return the number of training iterations
*/
public int getEpochs()
{
return epochs;
}
@Override
public Vec getRawWeight()
{
return w;
}
@Override
public double getBias()
{
return bias;
}
@Override
public Vec getRawWeight(int index)
{
if(index < 1)
return getRawWeight();
else
throw new IndexOutOfBoundsException("Model has only 1 weight vector");
}
@Override
public double getBias(int index)
{
if (index < 1)
return getBias();
else
throw new IndexOutOfBoundsException("Model has only 1 weight vector");
}
@Override
public int numWeightsVecs()
{
return 1;
}
@Override
public double regress(DataPoint data)
{
return regress(data.getNumericalValues());
}
private double regress(Vec data)
{
return w.dot(data) + bias;
}
@Override
public void train(RegressionDataSet dataSet, boolean parallel)
{
train(dataSet);
}
@Override
public void train(RegressionDataSet dataSet)
{
int batch = Math.min(batchSize, dataSet.size());
w = new DenseVector(dataSet.getNumNumericalVars());
IntList sample = new IntList(dataSet.size());
ListUtils.addRange(sample, 0, dataSet.size(), 1);
//Time and last time used to lazy update the parameters that do not get touched on a sparse update
int time = 0;
double[] errors = new double[batch];
final boolean sparseUpdates;
{
int sparse = 0;
for (int i = 0; i < dataSet.size(); i++)
if(dataSet.getDataPoint(i).getNumericalValues().isSparse())
sparse++;
if(sparse > dataSet.size()/4)
sparseUpdates = true;
else
sparseUpdates = false;
}
int[] lastTime = sparseUpdates ? new int[w.length()] : null;
for(int epoch = 0; epoch < epochs; epoch++)
{
Collections.shuffle(sample);
final double alpha = learningDecay.rate(epoch, epochs, learningRate)/batch;
final double alphaReg = alpha*lambda;
for(int i = 0; i < sample.size(); i+= batch)
{
if(i+batch >= sample.size())
continue;//skip, not enough in the batch
time++;
//get errors
for(int b = i; b < i+batch; b++)
errors[b-i] = regress(dataSet.getDataPoint(sample.get(i)))-dataSet.getTargetValue(sample.get(i));
//perform updates
for(int b = i; b < i+batch; b++)
{
final double error = errors[b-i];
final double alphaError = alpha*error;
//update bias
bias -= alphaError;
Vec x = dataSet.getDataPoint(sample.get(i)).getNumericalValues();
if(sparseUpdates)
{
for(IndexValue iv : x)
{
int idx = iv.getIndex();
if(lastTime[idx] != time)//update the theta for all missed updates
{
double theta_idx = w.get(idx);
w.set(idx, theta_idx*Math.pow(1-alphaReg, time-lastTime[idx]));
lastTime[idx] = time;
}
//now accumlate errors
w.increment(idx, -alphaError*iv.getValue());
}
}
else//dense updates, no need to track last time we updated weight values
{
if(b == i)//update on first access
w.mutableMultiply(1-alphaReg);
//add error
w.mutableSubtract(alphaError, x);
}
}
}
/*
* if sparse, accumulate missing weight updates due to
* regularization. If the learning rate changes, the weights must be
* updated at the end of every epoch. If the learing rate is
* constant, we only have to update on the last epoch
*/
if (sparseUpdates && ( !(learningDecay instanceof NoDecay) || (epoch == epochs-1) ))
{
for (int idx = 0; idx < w.length(); idx++)
{
if (lastTime[idx] != time)//update the theta for all missed updates
{
double theta_idx = w.get(idx);
w.set(idx, theta_idx * Math.pow(1 - alphaReg, time - lastTime[idx]));
lastTime[idx] = time;
}
}
}
}
}
@Override
public boolean supportsWeightedData()
{
return false;
}
@Override
public StochasticRidgeRegression clone()
{
StochasticRidgeRegression clone = new StochasticRidgeRegression(lambda, epochs, batchSize, learningRate, learningDecay);
if(this.w != null)
clone.w = this.w.clone();
clone.bias = this.bias;
return clone;
}
}
| 10,663 | 30.272727 | 128 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/UpdateableRegressor.java | package jsat.regression;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.DataPoint;
import jsat.exceptions.FailedToFitException;
/**
* UpdateableRegressor is an interface for one type of Online learner. The main
* characteristic of an online learner is that new example points can be added
* incrementally after the classifier was initially trained, or as part of its
* initial training. <br>
* Some Online learners behave differently in when they are updated. The
* UpdateableRegressor is an online learner that specifically only performs
* additional learning when a new example is provided via the
* {@link #update(jsat.classifiers.DataPoint, double) } method.
* <br>
* The standard behavior for an UpdateableRegressor is that the user first
* calls {@link #train(jsat.regression.RegressionDataSet) } to first train
* the classifier, or {@link #setUp(jsat.classifiers.CategoricalData[], int) }
* to prepare for online updates. Once one
* of these is called, it should then be safe to call
* {@link #update(jsat.classifiers.DataPoint, double) } without getting a
* {@link FailedToFitException}. Some online learners may require one of the
* train methods to be called first.
*
* @author Edward Raff
*/
public interface UpdateableRegressor extends Regressor
{
/**
* Prepares the classifier to begin learning from its
* {@link #update(jsat.classifiers.DataPoint, double) } method.
*
* @param categoricalAttributes an array containing the categorical
* attributes that will be in each data point
* @param numericAttributes the number of numeric attributes that will be in
* each data point
*/
public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes);
/**
* Updates the classifier by giving it a new data point to learn from.
* @param dataPoint the data point to learn
* @param weight weight of the data point to use in update
* @param targetValue the target value of the data point
*/
public void update(DataPoint dataPoint, double weight, double targetValue);
/**
* Updates the classifier by giving it a new data point to learn from.
* @param dataPoint the data point to learn
* @param targetValue the target value of the data point
*/
default public void update(DataPoint dataPoint, double targetValue)
{
update(dataPoint, 1.0, targetValue);
}
@Override
public UpdateableRegressor clone();
}
| 2,518 | 39.629032 | 86 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/WarmRegressor.java | package jsat.regression;
import java.util.concurrent.ExecutorService;
/**
* This interface is meant for models that support efficient warm starting from
* the solution of a previous model. Training with a warm start means that
* instead of solving the problem from scratch, the code can use a previous
* solution to start closer towards its goal. <br>
* <br>
* Some algorithm may be able to warm start from solutions of the same form,
* even if they were trained by a different algorithm. Other algorithms may only
* be able to warm start from the same algorithm. There may also be restrictions
* that the warm start can only be from a solution trained on the exact same
* data set. The latter case is indicated by the {@link #warmFromSameDataOnly()}
* method. <br>
* <br>
* Just because a regressor fits the type that the warm start interface states
* doesn't mean that it is a valid classifier to warm start from. <i>Regressors
* of the same class trained on the same data must <b>always</b> be valid to
* warm start from. </i>
* <br>
* <br>
* Note: The use of this class is still under development, and may change in the
* future.
*
* @author Edward Raff
*/
public interface WarmRegressor extends Regressor
{
/**
* Some models can only be warm started from a solution trained on the
* exact same data set as the model it is warm starting from. If this is the
* case {@code true} will be returned. The behavior for training on a
* different data set when this is defined is undefined. It may cause an
* error, or it may cause the algorithm to take longer or reach a worse
* solution. <br>
* When {@code true}, it is important that the data set be unaltered - this
* includes mutating the values stored or re-arranging the data points
* within the data set.
*
* @return {@code true} if the algorithm can only be warm started from the
* model trained on the exact same data set.
*/
public boolean warmFromSameDataOnly();
/**
* Trains the regressor and constructs a model for regression using the
* given data set. If the training method knows how, it will used the
* <tt>threadPool</tt> to conduct training in parallel. This method will
* block until the training has completed.
*
* @param dataSet the data set to train on
* @param warmSolution the solution to use to warm start this model
* @param parallel {@code true} if the training should be done using
* multiple-cores, {@code false} for single threaded.
*/
public void train(RegressionDataSet dataSet, Regressor warmSolution, boolean parallel);
/**
* Trains the regressor and constructs a model for regression using the
* given data set.
*
* @param dataSet the data set to train on
* @param warmSolution the solution to use to warm start this model
*/
default public void train(RegressionDataSet dataSet, Regressor warmSolution)
{
train(dataSet, warmSolution, false);
}
}
| 3,072 | 40.527027 | 91 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/evaluation/CoefficientOfDetermination.java | package jsat.regression.evaluation;
/**
* Uses the Coefficient of Determination, also known as R<sup>2</sup>, is an
* evaluation score in [0,1].
*
* @author Edward Raff
*/
public class CoefficientOfDetermination extends TotalHistoryRegressionScore
{
private static final long serialVersionUID = 1215708502913888821L;
/**
* Creates a new Coefficient of Determination object
*/
public CoefficientOfDetermination()
{
super();
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public CoefficientOfDetermination(CoefficientOfDetermination toCopy)
{
super(toCopy);
}
@Override
public double getScore()
{
double trueMean = truths.getVecView().mean();
double numer = 0, denom = 0;
for(int i = 0; i < truths.size(); i++)
{
numer += Math.pow(predictions.getD(i)-truths.getD(i), 2);
denom += Math.pow(trueMean-truths.getD(i), 2);
}
return 1-numer/denom;
}
@Override
public boolean lowerIsBetter()
{
return true;
}
@Override
public CoefficientOfDetermination clone()
{
return new CoefficientOfDetermination(this);
}
@Override
public int hashCode()
{//XXX this is a strange hashcode method
return getName().hashCode();
}
@Override
public boolean equals(Object obj)
{//XXX check for equality of fields and obj == null
if(this.getClass().isAssignableFrom(obj.getClass()) && obj.getClass().isAssignableFrom(this.getClass()))
{
return true;
}
return false;
}
@Override
public String getName()
{
return "Coefficient of Determination";
}
}
| 1,784 | 21.594937 | 112 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/evaluation/MeanAbsoluteError.java | package jsat.regression.evaluation;
import jsat.math.OnLineStatistics;
/**
* Uses the Mean of Absolute Errors between the predictions and the true values.
*
* @author Edward Raff
*/
public class MeanAbsoluteError implements RegressionScore
{
private static final long serialVersionUID = -637676526509989776L;
private OnLineStatistics absError;
public MeanAbsoluteError()
{
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public MeanAbsoluteError(MeanAbsoluteError toCopy)
{
if(toCopy.absError != null)
this.absError = toCopy.absError.clone();
}
@Override
public void prepare()
{
absError = new OnLineStatistics();
}
@Override
public void addResult(double prediction, double trueValue, double weight)
{
if(absError == null)
throw new RuntimeException("regression score has not been initialized");
absError.add(Math.abs(prediction-trueValue), weight);
}
@Override
public void addResults(RegressionScore other)
{
MeanAbsoluteError otherObj = (MeanAbsoluteError) other;
if(otherObj.absError != null)
this.absError.add(otherObj.absError);
}
@Override
public double getScore()
{
return absError.getMean();
}
@Override
public boolean lowerIsBetter()
{
return true;
}
@Override
public MeanAbsoluteError clone()
{
return new MeanAbsoluteError(this);
}
@Override
public int hashCode()
{//XXX this is a strange hashcode method
return getName().hashCode();
}
@Override
public boolean equals(Object obj)
{//XXX check for equality of fields and obj == null
if(this.getClass().isAssignableFrom(obj.getClass()) && obj.getClass().isAssignableFrom(this.getClass()))
{
return true;
}
return false;
}
@Override
public String getName()
{
return "Mean Absolute Error";
}
}
| 2,064 | 21.204301 | 112 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/evaluation/MeanSquaredError.java | package jsat.regression.evaluation;
import jsat.math.OnLineStatistics;
/**
* Uses the Mean of the Squared Errors between the predictions and the true
* values.
*
* @author Edward Raff
*/
public class MeanSquaredError implements RegressionScore
{
private static final long serialVersionUID = 3655567184376550126L;
private OnLineStatistics meanError;
private boolean rmse;
public MeanSquaredError()
{
this(false);
}
public MeanSquaredError(boolean rmse)
{
setRMSE(rmse);
}
public void setRMSE(boolean rmse)
{
this.rmse = rmse;
}
public boolean isRMSE()
{
return rmse;
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public MeanSquaredError(MeanSquaredError toCopy)
{
if(toCopy.meanError != null)
this.meanError = toCopy.meanError.clone();
this.rmse = toCopy.rmse;
}
@Override
public void prepare()
{
meanError = new OnLineStatistics();
}
@Override
public void addResult(double prediction, double trueValue, double weight)
{
if(meanError == null)
throw new RuntimeException("regression score has not been initialized");
meanError.add(Math.pow(prediction-trueValue, 2), weight);
}
@Override
public void addResults(RegressionScore other)
{
MeanSquaredError otherObj = (MeanSquaredError) other;
if(otherObj.meanError != null)
this.meanError.add(otherObj.meanError);
}
@Override
public double getScore()
{
if(rmse)
return Math.sqrt(meanError.getMean());
else
return meanError.getMean();
}
@Override
public boolean lowerIsBetter()
{
return true;
}
@Override
public int hashCode()
{//XXX this is a strange hashcode method
return getName().hashCode();
}
@Override
public boolean equals(Object obj)
{//XXX check for equality of fields and obj == null
if(this.getClass().isAssignableFrom(obj.getClass()) && obj.getClass().isAssignableFrom(this.getClass()))
{
return this.rmse == ((MeanSquaredError)obj).rmse;
}
return false;
}
@Override
public MeanSquaredError clone()
{
return new MeanSquaredError(this);
}
@Override
public String getName()
{
String prefix = rmse ? "Root " : "";
return prefix + "Mean Squared Error";
}
}
| 2,552 | 21.008621 | 112 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/evaluation/RegressionScore.java | package jsat.regression.evaluation;
import java.io.Serializable;
/**
* This interface defines the contract for evaluating or "scoring" the results
* on a regression problem. <br>
* <br>
* All regression scores must override the {@link #equals(java.lang.Object)}
* and {@link #hashCode() } methods. If a score has parameters, different
* objects with different parameters must not be equal. However, different
* objects with the same parameters must be equal <i>even if their internal
* states are different</i>
*
* @author Edward Raff
*/
public interface RegressionScore extends Serializable
{
public void prepare();
/**
* Adds the given result to the score
* @param prediction the prediction for the data point
* @param trueValue the true value for the data point
* @param weight the weigh to assign to the data point
*/
public void addResult(double prediction, double trueValue, double weight);
/**
* The score contained in <i>this</i> object is augmented with the results
* already accumulated in the {@code other} object. This does not result in
* an averaging, but alters the current object to have the same score it
* would have had if all the results were originally inserted into <i>this
* </i> object. <br>
* <br>
* This method is only required to work if {@code other} if of the same
* class as {@code this} object.
*
* @param other the object to add the results from
*/
public void addResults(RegressionScore other);
/**
* Computes the score for the results that have been enrolled via
* {@link #addResult(double, double, double) }
*
* @return the score for the current results
*/
public double getScore();
/**
* Returns {@code true} if a lower score is better, or {@code false} if a
* higher score is better
* @return {@code true} if a lower score is better
*/
public boolean lowerIsBetter();
@Override
public boolean equals(Object obj);
@Override
public int hashCode();
public RegressionScore clone();
/**
* Returns the name to present for this score
* @return the score name
*/
public String getName();
}
| 2,288 | 30.791667 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/evaluation/RelativeAbsoluteError.java | package jsat.regression.evaluation;
/**
* Uses the Sum of Absolute Errors divided by the sum of the absolute value of
* the true values subtracted from their mean. This produces an error metric
* that has no units.
*
* @author Edward Raff
*/
public class RelativeAbsoluteError extends TotalHistoryRegressionScore
{
private static final long serialVersionUID = -6152988968756871647L;
/**
* Creates a new Relative Absolute Error evaluator
*/
public RelativeAbsoluteError()
{
super();
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public RelativeAbsoluteError(RelativeAbsoluteError toCopy)
{
super(toCopy);
}
@Override
public double getScore()
{
double trueMean = truths.getVecView().mean();
double numer = 0, denom = 0;
for(int i = 0; i < truths.size(); i++)
{
numer += Math.abs(predictions.getD(i)-truths.getD(i));
denom += Math.abs(trueMean-truths.getD(i));
}
return numer/denom;
}
@Override
public boolean lowerIsBetter()
{
return true;
}
@Override
public RelativeAbsoluteError clone()
{
return new RelativeAbsoluteError(this);
}
@Override
public int hashCode()
{//XXX this is a strange hashcode method
return getName().hashCode();
}
@Override
public boolean equals(Object obj)
{//XXX check for equality of fields and obj == null
if(this.getClass().isAssignableFrom(obj.getClass()) && obj.getClass().isAssignableFrom(this.getClass()))
{
return true;
}
return false;
}
@Override
public String getName()
{
return "Relative Absolute Error";
}
}
| 1,812 | 21.6625 | 112 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/evaluation/RelativeSquaredError.java | package jsat.regression.evaluation;
/**
* Uses the Sum of Squared Errors divided by the sum of the squared true values
* subtracted from their mean. This produces an error metric that has no units.
*
* @author Edward Raff
*/
public class RelativeSquaredError extends TotalHistoryRegressionScore
{
private static final long serialVersionUID = 8377798320269626429L;
/**
* Creates a new Relative Squared Error object
*/
public RelativeSquaredError()
{
super();
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public RelativeSquaredError(RelativeSquaredError toCopy)
{
super(toCopy);
}
@Override
public double getScore()
{
double trueMean = truths.getVecView().mean();
double numer = 0, denom = 0;
for(int i = 0; i < truths.size(); i++)
{
numer += Math.pow(predictions.getD(i)-truths.getD(i), 2);
denom += Math.pow(trueMean-truths.getD(i), 2);
}
return numer/denom;
}
@Override
public boolean lowerIsBetter()
{
return true;
}
@Override
public RelativeSquaredError clone()
{
return new RelativeSquaredError(this);
}
@Override
public int hashCode()
{//XXX this is a strange hashcode method
return getName().hashCode();
}
@Override
public boolean equals(Object obj)
{//XXX check for equality of fields and obj == null
if(this.getClass().isAssignableFrom(obj.getClass()) && obj.getClass().isAssignableFrom(this.getClass()))
{
return true;
}
return false;
}
@Override
public String getName()
{
return "Relative Squared Error";
}
}
| 1,787 | 21.632911 | 112 | java |
JSAT | JSAT-master/JSAT/src/jsat/regression/evaluation/TotalHistoryRegressionScore.java | package jsat.regression.evaluation;
import jsat.utils.DoubleList;
/**
* This abstract class provides the work for maintaining the history of
* predictions and their true values.
*
* @author Edward Raff
*/
public abstract class TotalHistoryRegressionScore implements RegressionScore
{
private static final long serialVersionUID = -5262934560490160236L;
/**
* List of the true target values
*/
protected DoubleList truths;
/**
* List of the predict values for each target
*/
protected DoubleList predictions;
/**
* The weight of importance for each point
*/
protected DoubleList weights;
public TotalHistoryRegressionScore()
{
}
/**
* Copy constructor
* @param toCopy the object to copy
*/
public TotalHistoryRegressionScore(TotalHistoryRegressionScore toCopy)
{
if(toCopy.truths != null)
{
this.truths = new DoubleList(toCopy.truths);
this.predictions = new DoubleList(toCopy.predictions);
this.weights = new DoubleList(toCopy.weights);
}
}
@Override
public void prepare()
{
truths = new DoubleList();
predictions = new DoubleList();
weights = new DoubleList();
}
@Override
public void addResult(double prediction, double trueValue, double weight)
{
truths.add(trueValue);
predictions.add(prediction);
weights.add(weight);
}
@Override
public void addResults(RegressionScore other)
{
TotalHistoryRegressionScore otherObj = (TotalHistoryRegressionScore) other;
this.truths.addAll(otherObj.truths);
this.predictions.addAll(otherObj.predictions);
this.weights.addAll(otherObj.weights);
}
@Override
public abstract TotalHistoryRegressionScore clone();
}
| 1,865 | 23.88 | 83 | java |
JSAT | JSAT-master/JSAT/src/jsat/testing/StatisticTest.java |
package jsat.testing;
/**
*
* @author Edward Raff
*/
public interface StatisticTest
{
public enum H1
{
LESS_THAN
{
@Override
public String toString()
{
return "<";
}
},
GREATER_THAN
{
@Override
public String toString()
{
return ">";
}
},
NOT_EQUAL
{
@Override
public String toString()
{
return "\u2260";
}
}
};
/**
*
* @return an array of the valid alternate hypothesis for this test
*/
public H1[] validAlternate();
public void setAltHypothesis(H1 h1);
/**
*
* @return a descriptive name for the statistical test
*/
public String testName();
public double pValue();
}
| 946 | 14.783333 | 71 | java |
JSAT | JSAT-master/JSAT/src/jsat/testing/goodnessoffit/KSTest.java |
package jsat.testing.goodnessoffit;
import jsat.distributions.ContinuousDistribution;
import jsat.distributions.Kolmogorov;
import jsat.linear.Vec;
/**
*
* @author Edward Raff
*/
public class KSTest
{
private static final Kolmogorov k = new Kolmogorov();
private Vec v;
/**
* Creates a new statistical test for testing. The 1 sample test, with <tt>v</tt>
* being the 1 sample. The 1 sample test compare the data to a given distribution,
* and see if it does not belong to the given distribution. The 2 sample test is
* designed to tell if the data is not from the same population.
*
* @param v the date to be one of the samples
*/
public KSTest(Vec v)
{
this.v = v.sortedCopy();
}
/**
* Change the original sample to <tt>v</tt>
* @param v the new original sample.
*/
public void setBaseData(Vec v)
{
this.v = v;
}
/**
* Calculates the D statistic for comparison against a continous distribution
* @param cd the distribution to compare against
* @return the max difference between the empirical CDF and the 'true' CDF of the given distribution
*/
protected double dCalc(ContinuousDistribution cd)
{
double max = 0;
for(int i = 0; i < v.length(); i++)
{
//ECDF(x) - F(x)
if(v.get(i) >= cd.min() && v.get(i) <= cd.max() )
{
double tmp = (i+1.0)/v.length() - cd.cdf(v.get(i));
max = Math.max(max, Math.abs(tmp));
}
else//The data dose not fit in the rang eof the distribution
{
max = Math.max(max, Math.abs((i+1.0)/v.length()));
}
}
return max;
}
private static double ECDF(Vec s, double x)
{
int min = 0;
int max = s.length()-1;
int mid = (min+max) /2;
do
{
if(x > s.get(mid))
min = mid+1;
else
max = mid-1;
}
while(s.get(mid) != x && min <= max);
return (mid+1.0)/s.length();
}
/**
* Calculates the D statistic for comparison against another data set
* @param o the other data set
* @return the max difrence in empirical CDF of the distributions.
*/
protected double dCaldO(Vec o)
{
double max = 0;
for(int i = 0; i < v.length(); i++)
{
//ECDF(x) - F(x)
double tmp = (i+1.0)/v.length() - ECDF(o, v.get(i));
max = Math.max(max, Math.abs(tmp));
}
for(int i = 0; i < o.length(); i++)
{
//ECDF(x) - F(x)
double tmp = (i+1.0)/o.length() - ECDF(v, o.get(i));
max = Math.max(max, Math.abs(tmp));
}
return max;
}
/**
* Returns the p-value for the KS Test against the given distribution <tt>cd</tt>. <br>
* The null hypothesis of this test is that the given data set belongs to the given distribution. <br>
* The alternative hypothesis is that the data set does not belong to the given distribution.
*
*
* @param cd the distribution to compare against
* @return the p-value of the test against this distribution
*/
public double testDist(ContinuousDistribution cd)
{
double d = dCalc(cd);
double n = v.length();
return pValue(n, d);
}
/**
* Returns the p-value for the 2 sample KS Test against the given data set <tt>data</tt>. <br>
* The null hypothesis of this test is that the given data set is from the same population as <tt>data</tt> <br>
* The alternative hypothesis is that the data set does not belong to the same population as <tt>data</tt>
* @param data the other distribution to compare against
* @return the p-value of the test against this data set
*/
public double testData(Vec data)
{
double d = dCaldO(data);
double n = v.length()*data.length() / ((double) v.length() +data.length());
return pValue(n, d);
}
private double pValue(double n, double d)
{
return 1 - k.cdf( (Math.sqrt(n) + 0.12 + 0.11/Math.sqrt(n)) * d);
}
}
| 4,374 | 27.782895 | 116 | java |
JSAT | JSAT-master/JSAT/src/jsat/testing/onesample/OneSampleTest.java |
package jsat.testing.onesample;
import jsat.linear.Vec;
import jsat.testing.StatisticTest;
/**
*
* @author Edward Raff
*/
public interface OneSampleTest extends StatisticTest
{
/**
* Sets the statistics that will be tested against an alternate hypothesis.
*
* @param data
*/
public void setTestUsingData(Vec data);
public String[] getTestVars();
public void setTestVars(double[] testVars);
public String getAltVar();
public void setAltVar(double altVar);
public String getNullVar();
}
| 562 | 18.413793 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/testing/onesample/TTest.java |
package jsat.testing.onesample;
import jsat.distributions.StudentT;
import jsat.linear.Vec;
import jsat.text.GreekLetters;
/**
*
* @author Edward Raff
*/
public class TTest implements OneSampleTest
{
private StudentT tDist;
private H1 h1;
private double hypothMean;
private double sampleMean;
private double sampleDev;
private double sampleSize;
public TTest(H1 h1, double hypothMean, double sampleMean, double sampleDev, double sampleSize)
{
this.h1 = h1;
this.hypothMean = hypothMean;
this.sampleMean = sampleMean;
this.sampleDev = sampleDev;
this.sampleSize = sampleSize;
tDist = new StudentT(sampleSize-1);
}
public TTest(double hypothMean, double sampleMean, double sampleDev, double sampleSize)
{
this(H1.NOT_EQUAL, hypothMean, sampleMean, sampleDev, sampleSize);
}
public TTest(H1 h1, double hypothMean, Vec data)
{
this(h1, hypothMean, data.mean(), data.standardDeviation(), data.length());
}
public TTest()
{
this(1, 2, 2, 2);
}
public void setTestUsingData(Vec data)
{
this.sampleMean = data.mean();
this.sampleDev = data.standardDeviation();
this.sampleSize = data.length();
tDist.setDf(sampleSize-1);
}
public String[] getTestVars()
{
return new String[]
{
GreekLetters.bar("x"),
GreekLetters.sigma,
"n"
};
}
public void setTestVars(double[] testVars)
{
this.sampleMean = testVars[0];
this.sampleDev = testVars[1];
this.sampleSize = testVars[2];
tDist.setDf(sampleSize-1);
}
public String getAltVar()
{
return GreekLetters.mu + "0";
}
public void setAltVar(double altVar)
{
hypothMean = altVar;
}
public String getNullVar()
{
return GreekLetters.mu;
}
public H1[] validAlternate()
{
return new H1[]
{
H1.LESS_THAN, H1.NOT_EQUAL, H1.GREATER_THAN
};
}
public void setAltHypothesis(H1 h1)
{
this.h1 = h1;
}
public String testName()
{
return "T Test";
}
public double pValue()
{
double tScore = (sampleMean - hypothMean)*Math.sqrt(sampleSize)/sampleDev;
if(h1 == H1.NOT_EQUAL)
return tDist.cdf(-Math.abs(tScore))*2;
else if(h1 == H1.LESS_THAN)
return tDist.cdf(tScore);
else
return 1-tDist.cdf(tScore);
}
}
| 2,669 | 20.532258 | 98 | java |
JSAT | JSAT-master/JSAT/src/jsat/testing/onesample/ZTest.java |
package jsat.testing.onesample;
import jsat.distributions.Normal;
import jsat.linear.Vec;
import jsat.text.GreekLetters;
/**
*
* @author Edward Raff
*/
public class ZTest implements OneSampleTest
{
private Normal norm;
private double sampleMean;
private double sampleDev;
private int sampleSize;
private H1 h1;
/**
* The mean of the null hypothesis
*/
private double hypoMean;
public ZTest()
{
this(0, 1, 1);
}
public ZTest(double sampleMean, double sampleDev, int sampleSize)
{
this(H1.NOT_EQUAL, sampleMean, sampleDev, sampleSize);
}
public ZTest(H1 h1, double sampleMean, double sampleDev, int sampleSize)
{
this.h1 = h1;
this.hypoMean = 0;
this.sampleMean = sampleMean;
this.sampleDev = sampleDev;
this.sampleSize = sampleSize;
this.norm = new Normal();
}
public ZTest(Vec data)
{
this(data.mean(), data.standardDeviation(), data.length());
}
public ZTest(H1 h1, Vec data)
{
this(h1, data.mean(), data.standardDeviation(), data.length());
}
public H1[] validAlternate()
{
return new H1[]
{
H1.LESS_THAN, H1.NOT_EQUAL, H1.GREATER_THAN
};
}
public String testName()
{
return "One Sample Z-Test";
}
public void setTestUsingData(Vec data)
{
this.sampleMean = data.mean();
this.sampleDev = data.standardDeviation();
this.sampleSize = data.length();
}
public String[] getTestVars()
{
return new String[]{GreekLetters.bar("x"), GreekLetters.sigma, "n"};
}
public void setTestVars(double[] testVars)
{
this.sampleMean = testVars[0];
this.sampleDev = testVars[1];
this.sampleSize = (int) testVars[2];
}
public String getAltVar()
{
return GreekLetters.mu + "0";
}
public void setAltVar(double altVar)
{
this.hypoMean = altVar;
}
public double pValue()
{
double se = sampleDev/Math.sqrt(sampleSize);
double zScore = (sampleMean-hypoMean)/se;
if(h1 == H1.NOT_EQUAL)
return norm.cdf(-Math.abs(zScore))*2;
else if(h1 == H1.LESS_THAN)
return norm.cdf(zScore);
else
return 1-norm.cdf(zScore);
}
public void setAltHypothesis(H1 h1)
{
this.h1 = h1;
}
public String getNullVar()
{
return GreekLetters.mu;
}
}
| 2,606 | 19.527559 | 76 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/BasicTextVectorCreator.java | package jsat.text;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import jsat.linear.SparseVector;
import jsat.linear.Vec;
import jsat.text.tokenizer.Tokenizer;
import jsat.text.wordweighting.WordWeighting;
/**
* Creates new text vectors from a dictionary of known tokens and a word
* weighting scheme. <br>
* <br>
* This object is generally intended to be constructed by a
* {@link TextDataLoader}, though can be used if you know all the words you will
* need (and can initialize the {@link WordWeighting}) before creating this
* object.
*
* @author Edward Raff
*/
public class BasicTextVectorCreator implements TextVectorCreator
{
private static final long serialVersionUID = -8620485679300539556L;
private final Tokenizer tokenizer;
private final Map<String, Integer> wordIndex;
private final WordWeighting weighting;
/**
* Creates a new basic text vector creator
* @param tokenizer the tokenizer to apply to incoming strings
* @param wordIndex the map of each known word to its index, the size of the
* map indicating the maximum (exclusive) index
* @param weighting the weighting process to apply to each loaded document.
* This should have already been initialized, or be stateless.
*/
public BasicTextVectorCreator(Tokenizer tokenizer, Map<String, Integer> wordIndex, WordWeighting weighting)
{
this.tokenizer = tokenizer;
this.wordIndex = wordIndex;
this.weighting = weighting;
}
@Override
public Vec newText(String text)
{
return newText(text, new StringBuilder(), new ArrayList<String>());
}
@Override
public Vec newText(String input, StringBuilder workSpace, List<String> storageSpace)
{
tokenizer.tokenize(input, workSpace, storageSpace);
SparseVector vec = new SparseVector(wordIndex.size());
for( String word : storageSpace)
{
if(wordIndex.containsKey(word))//Could also call retainAll on words before looping. Worth while to investigate
{
int index = wordIndex.get(word);
vec.increment(index, 1.0);
}
}
weighting.applyTo(vec);
return vec;
}
}
| 2,270 | 31.442857 | 123 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/ClassificationHashedTextDataLoader.java | package jsat.text;
import java.util.List;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.ClassificationDataSet;
import jsat.text.tokenizer.Tokenizer;
import jsat.text.wordweighting.WordWeighting;
import jsat.utils.IntList;
/**
* This class provides a framework for loading classification datasets made of
* text documents as hashed feature vectors. This extension uses
* {@link #addOriginalDocument(java.lang.String, int) } instead so that the
* original documents have a class label associated with them.
* {@link #getDataSet() } then returns a classification data set, where the
* class label for each data point is the label provided when
* {@code addOriginalDocument} was called.
* <br>
* New vectors created with {@link #newText(java.lang.String) } are inherently
* not part of the original data set, so do not need or receive a class label.
*
* @author Edward Raff
*/
public abstract class ClassificationHashedTextDataLoader extends HashedTextDataLoader
{
private static final long serialVersionUID = -1350008848821058696L;
/**
* The list of the true class labels for the data that was loaded before
* {@link #finishAdding() } was called.
*/
protected List<Integer> classLabels;
/**
* The information about the class label that would be predicted for a
* classification data set.
*/
protected CategoricalData labelInfo;
/**
* Creates an new hashed text data loader for classification problems, it
* uses a relatively large default size of 2<sup>22</sup> for the dimension
* of the space.
*
* @param tokenizer the tokenization method to break up strings with
* @param weighting the scheme to set the weights for feature vectors.
*/
public ClassificationHashedTextDataLoader(Tokenizer tokenizer, WordWeighting weighting)
{
this(1<<22, tokenizer, weighting);
}
/**
* Creates an new hashed text data loader for classification problems.
* @param dimensionSize the size of the hashed space to use.
* @param tokenizer the tokenization method to break up strings with
* @param weighting the scheme to set the weights for feature vectors.
*/
public ClassificationHashedTextDataLoader(int dimensionSize, Tokenizer tokenizer, WordWeighting weighting)
{
super(dimensionSize, tokenizer, weighting);
classLabels = new IntList();
}
/**
* The classification label data stored in {@link #labelInfo} must be set
* if the text loader is to return a classification data set. As such, this
* abstract class exists to force the user to set it, in this way they can
* not forget. <br>
* This will be called in {@link #getDataSet() } just before
* {@link #initialLoad() } is called.
*/
protected abstract void setLabelInfo();
/**
* Should use {@link #addOriginalDocument(java.lang.String, int) } instead.
* @param text the text of the data to add
* @return the index of the created document for the given text. Starts from
* zero and counts up.
*/
@Override
protected int addOriginalDocument(String text)
{
throw new UnsupportedOperationException("addOriginalDocument(String"
+ " text, int label) should be used instead");
}
/**
* To be called by the {@link #initialLoad() } method.
* It will take in the text and add a new document
* vector to the data set. Once all text documents
* have been loaded, this method should never be
* called again. <br>
* This method is thread safe.
*
* @param text the text of the document to add
* @param label the classification label for this document
* @return the index of the created document for the given text. Starts from
* zero and counts up.
*/
protected int addOriginalDocument(String text, int label)
{
if(label >= labelInfo.getNumOfCategories())
throw new RuntimeException("Invalid label given");
int index = super.addOriginalDocument(text);
synchronized(classLabels)
{
while(classLabels.size() < index)
classLabels.add(-1);
if(classLabels.size() == index)//we are where we expect
classLabels.add(label);
else//another thread beat us to the addition
classLabels.set(index, label);
}
return index;
}
@Override
public ClassificationDataSet getDataSet()
{
if(!noMoreAdding)
{
setLabelInfo();
initialLoad();
finishAdding();
}
ClassificationDataSet cds =
new ClassificationDataSet(vectors.get(0).length(),
new CategoricalData[]{}, labelInfo);
for(int i = 0; i < vectors.size(); i++)
cds.addDataPoint(vectors.get(i), new int[]{}, classLabels.get(i));
return cds;
}
}
| 5,044 | 35.824818 | 110 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/ClassificationTextDataLoader.java | package jsat.text;
import java.util.List;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.ClassificationDataSet;
import jsat.text.tokenizer.Tokenizer;
import jsat.text.wordweighting.WordWeighting;
import jsat.utils.IntList;
/**
* This class provides a framework for loading classification datasets made of
* text documents as vectors. This extension uses
* {@link #addOriginalDocument(java.lang.String, int) } instead so that the
* original documents have a class label associated with them.
* {@link #getDataSet() } then returns a classification data set, where the
* class label for each data point is the label provided when
* <tt>addOriginalDocument</tt> was called.
* <br>
* New vectors created with {@link #newText(java.lang.String) } are inherently
* not part of the original data set, so do not need or receive a class label.
*
* @author Edward Raff
*/
public abstract class ClassificationTextDataLoader extends TextDataLoader
{
private static final long serialVersionUID = -3826551504785236576L;
/**
* The list of the true class labels for the data that was loaded before
* {@link #finishAdding() } was called.
*/
protected final List<Integer> classLabels;
/**
* The information about the class label that would be predicted for a
* classification data set.
*/
protected CategoricalData labelInfo;
/**
* Creates a new text data loader
*
* @param tokenizer the string tokenizer to use on each input
* @param weighting the weighting scheme to apply to each vector in the
* collection
*/
public ClassificationTextDataLoader(Tokenizer tokenizer, WordWeighting weighting)
{
super(tokenizer, weighting);
classLabels = new IntList();
}
/**
* The classification label data stored in {@link #labelInfo} must be set
* if the text loader is to return a classification data set. As such, this
* abstract class exists to force the user to set it, in this way they can
* not forget. <br>
* This will be called in {@link #getDataSet() } just before
* {@link #initialLoad() } is called.
*/
protected abstract void setLabelInfo();
/**
* Should use {@link #addOriginalDocument(java.lang.String, int) } instead.
* @param text the text of the data to add
* @return the index of the created document for the given text. Starts from
* zero and counts up.
*/
@Override
protected int addOriginalDocument(String text)
{
throw new UnsupportedOperationException("addOriginalDocument(String"
+ " text, int label) should be used instead");
}
/**
* To be called by the {@link #initialLoad() } method.
* It will take in the text and add a new document
* vector to the data set. Once all text documents
* have been loaded, this method should never be
* called again. <br>
* This method is thread safe
*
* @param text the text of the document to add
* @param label the classification label for this document
* @return the index of the created document for the given text. Starts from
* zero and counts up.
*/
protected int addOriginalDocument(String text, int label)
{
if(label >= labelInfo.getNumOfCategories())
throw new RuntimeException("Invalid label given");
int index = super.addOriginalDocument(text);
synchronized(classLabels)
{
while(classLabels.size() < index)
classLabels.add(-1);
if(classLabels.size() == index)//we are where we expect
classLabels.add(label);
else//another thread beat us to the addition
classLabels.set(index, label);
}
return index;
}
@Override
public ClassificationDataSet getDataSet()
{
if(!noMoreAdding)
{
setLabelInfo();
initialLoad();
finishAdding();
}
ClassificationDataSet cds =
new ClassificationDataSet(vectors.get(0).length(),
new CategoricalData[]{}, labelInfo);
for(int i = 0; i < vectors.size(); i++)
cds.addDataPoint(vectors.get(i), new int[]{}, classLabels.get(i));
return cds;
}
}
| 4,402 | 33.944444 | 85 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/GreekLetters.java |
package jsat.text;
/**
*
* @author Edward Raff
*/
public class GreekLetters
{
public static final String alpha = "\u03B1";
public static final String beta = "\u03B2";
public static final String gamma = "\u03B3";
public static final String delta = "\u03B4";
public static final String epsilon = "\u03B5";
public static final String zeta = "\u03B6";
public static final String eta = "\u03B7";
public static final String theta = "\u03B8";
public static final String iota = "\u03B9";
public static final String kappa = "\u03BA";
public static final String lamda = "\u03BB";
public static final String mu = "\u03BC";
public static final String nu = "\u03BD";
public static final String xi = "\u03BE";
public static final String omicron = "\u03BF";
public static final String pi = "\u03C0";
public static final String rho = "\u03C1";
public static final String finalSigma = "\u03C2";
public static final String sigma = "\u03C3";
public static final String tau = "\u03C4";
public static final String upsilon = "\u03C5";
public static final String phi = "\u03C6";
public static final String chi = "\u03C7";
public static final String psi = "\u03C8";
public static final String omega = "\u03C9";
/**
* Puts an over line on top the string s.
* @param s the character to put a line over
* @return the input with a line over
*/
public static String bar(String s)
{
return s + "\u0305";
}
}
| 1,537 | 31.723404 | 53 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/HashedTextDataLoader.java | package jsat.text;
import java.util.*;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicIntegerArray;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.DataPoint;
import jsat.linear.SparseVector;
import jsat.linear.Vec;
import jsat.text.tokenizer.Tokenizer;
import jsat.text.wordweighting.WordWeighting;
import jsat.utils.IntList;
/**
* This class provides a framework for loading datasets made of Text documents
* as hashed feature vectors. Text is broken up into a sequence of tokens using
* a {@link Tokenizer}, that must be provided. The weights used will be
* determined by some {@link WordWeighting word weighting scheme}. <br>
* The user adds documents to the initial dataset using the {@link #addOriginalDocument(java.lang.String)
* } method. The {@link #finishAdding() } must be called when no more documents
* are left to add, at which point class will take care of calling the {@link WordWeighting#setWeight(java.util.List, java.util.List)
* } method to configure the word weighting used with the original data
* added.<br>
* <br>
* After the initial dataset is loaded, new strings can be converted to vectors
* using the {@link #newText(java.lang.String) } method. This should only be
* called after {@link #finishAdding() }.<br>
* <br>
* Instance of this class will keep a reference to all originally added vectors.
* To transform new texts into vectors without keeping references to all of the
* original vectors, the {@link #getTextVectorCreator() } will return an object
* that perform the transformation.
*
* @author Edward Raff
*/
abstract public class HashedTextDataLoader implements TextVectorCreator
{
private static final long serialVersionUID = 8513621180409278670L;
private final int dimensionSize;
/**
* Tokenizer to apply to input strings
*/
private Tokenizer tokenizer;
private WordWeighting weighting;
/**
* List of original vectors
*/
protected List<SparseVector> vectors;
private AtomicIntegerArray termDocumentFrequencys;
protected boolean noMoreAdding;
private volatile int documents;
/**
* Temporary work space to use for tokenization
*/
protected ThreadLocal<StringBuilder> workSpace;
/**
* Temporary storage space to use for tokenization
*/
protected ThreadLocal<List<String>> storageSpace;
/**
* Temporary space to use when creating vectors
*/
protected ThreadLocal<Map<String, Integer>> wordCounts;
private TextVectorCreator tvc;
public HashedTextDataLoader(Tokenizer tokenizer, WordWeighting weighting)
{
this(1<<22, tokenizer, weighting);
}
public HashedTextDataLoader(int dimensionSize, Tokenizer tokenizer, WordWeighting weighting)
{
this.dimensionSize = dimensionSize;
this.tokenizer = tokenizer;
this.weighting = weighting;
this.termDocumentFrequencys = new AtomicIntegerArray(dimensionSize);
this.vectors = new ArrayList<SparseVector>();
this.tvc = new HashedTextVectorCreator(dimensionSize, tokenizer, weighting);
noMoreAdding = false;
this.workSpace = new ThreadLocal<StringBuilder>();
this.storageSpace = new ThreadLocal<List<String>>();
this.wordCounts = new ThreadLocal<Map<String, Integer>>();
}
/**
* This method will load all the text documents that make up the original
* data set from their source. For each document,
* {@link #addOriginalDocument(java.lang.String) } should be called with the
* text of the document. <br>
* This method will be called when {@link #getDataSet() } is called for the
* first time. <br>
* New document vectors can be obtained after loading by calling
* {@link #newText(java.lang.String) }.
*/
protected abstract void initialLoad();
/**
* To be called by the {@link #initialLoad() } method.
* It will take in the text and add a new document
* vector to the data set. Once all text documents
* have been loaded, this method should never be
* called again. <br>
* This method is thread safe.
*
* @param text the text of the document to add
* @return the index of the created document for the given text. Starts from
* zero and counts up.
*/
protected int addOriginalDocument(String text)
{
if(noMoreAdding)
throw new RuntimeException("Initial data set has been finalized");
StringBuilder localWorkSpace = workSpace.get();
List<String> localStorageSpace = storageSpace.get();
Map<String, Integer> localWordCounts = wordCounts.get();
if(localWorkSpace == null)
{
localWorkSpace = new StringBuilder();
localStorageSpace = new ArrayList<String>();
localWordCounts = new LinkedHashMap<String, Integer>();
workSpace.set(localWorkSpace);
storageSpace.set(localStorageSpace);
wordCounts.set(localWordCounts);
}
localWorkSpace.setLength(0);
localStorageSpace.clear();
tokenizer.tokenize(text, localWorkSpace, localStorageSpace);
for(String word : localStorageSpace)
{
Integer count = localWordCounts.get(word);
if(count == null)
localWordCounts.put(word, 1);
else
localWordCounts.put(word, count+1);
}
SparseVector vec = new SparseVector(dimensionSize, localWordCounts.size());
for(Iterator<Entry<String, Integer>> iter = localWordCounts.entrySet().iterator(); iter.hasNext();)
{
Entry<String, Integer> entry = iter.next();
String word = entry.getKey();
//XXX This code generates a hashcode and then computes the absolute value of that hashcode. If the hashcode is Integer.MIN_VALUE, then the result will be negative as well (since Math.abs(Integer.MIN_VALUE) == Integer.MIN_VALUE).
int index = Math.abs(word.hashCode()) % dimensionSize;
vec.set(index, entry.getValue());
termDocumentFrequencys.addAndGet(index, entry.getValue());
iter.remove();
}
synchronized(vectors)
{
vectors.add(vec);
return documents++;
}
}
/**
* Once all original documents have been added, this method is called so
* that post processing steps can be applied.
*/
protected void finishAdding()
{
noMoreAdding = true;
workSpace = null;
storageSpace = null;
wordCounts = null;
final int[] frqs = new int[dimensionSize];
for(int i = 0; i < termDocumentFrequencys.length(); i++)
frqs[i] = termDocumentFrequencys.get(i);
weighting.setWeight(vectors, IntList.unmodifiableView(frqs, dimensionSize));
for(SparseVector vec : vectors)
weighting.applyTo(vec);
termDocumentFrequencys = null;
}
/**
* Returns a new data set containing the original data points that were
* loaded with this loader.
*
* @return an appropriate data set for this loader
*/
public DataSet getDataSet()
{
if(!noMoreAdding)
{
initialLoad();
finishAdding();
}
List<DataPoint> dataPoints= new ArrayList<DataPoint>(vectors.size());
for(SparseVector vec : vectors)
dataPoints.add(new DataPoint(vec, new int[0], new CategoricalData[0]));
return new SimpleDataSet(dataPoints);
}
@Override
public Vec newText(String input)
{
return getTextVectorCreator().newText(input);
}
@Override
public Vec newText(String input, StringBuilder workSpace, List<String> storageSpace)
{
return getTextVectorCreator().newText(input, workSpace, storageSpace);
}
/**
* Returns the {@link TextVectorCreator} used by this data loader to convert
* documents into vectors.
*
* @return the text vector creator used by this class
*/
public TextVectorCreator getTextVectorCreator()
{
if(!noMoreAdding)
throw new RuntimeException("Initial documents have not yet loaded");
return tvc;
}
}
| 8,488 | 34.970339 | 241 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/HashedTextVectorCreator.java | package jsat.text;
import java.util.ArrayList;
import java.util.List;
import jsat.linear.SparseVector;
import jsat.linear.Vec;
import jsat.text.tokenizer.Tokenizer;
import jsat.text.wordweighting.BinaryWordPresent;
import jsat.text.wordweighting.WordWeighting;
/**
* Hashed Text Vector Creator exists to convert a text string into a
* {@link Vec} using feature hashing. The {@link Tokenizer tokenization} and
* {@link WordWeighting word weighting} method must be provided and already set
* up. When constructed the user should make sure the
* {@link WordWeighting#setWeight(java.util.List, java.util.List) }
* method has already been called, or is a stateless weighting (such as
* {@link BinaryWordPresent}).
*
* @author Edward Raff
*/
public class HashedTextVectorCreator implements TextVectorCreator
{
private static final long serialVersionUID = 1081388790985568192L;
private int dimensionSize;
private Tokenizer tokenizer;
private WordWeighting weighting;
/**
* Creates a new text vector creator that works with hash-trick features
* @param dimensionSize the dimension size of the feature space
* @param tokenizer the tokenizer to apply to incoming strings
* @param weighting the weighting process to apply to each loaded document.
*/
public HashedTextVectorCreator(int dimensionSize, Tokenizer tokenizer, WordWeighting weighting)
{
if(dimensionSize <= 1)
throw new ArithmeticException("Vector dimension must be a positive value");
this.dimensionSize = dimensionSize;
this.tokenizer = tokenizer;
this.weighting = weighting;
}
@Override
public Vec newText(String input)
{
return newText(input, new StringBuilder(), new ArrayList<String>());
}
@Override
public Vec newText(String input, StringBuilder workSpace, List<String> storageSpace)
{
tokenizer.tokenize(input, workSpace, storageSpace);
SparseVector vec = new SparseVector(dimensionSize);
for(String word : storageSpace)
{
//XXX This code generates a hashcode and then computes the absolute value of that hashcode. If the hashcode is Integer.MIN_VALUE, then the result will be negative as well (since Math.abs(Integer.MIN_VALUE) == Integer.MIN_VALUE).
vec.increment(Math.abs(word.hashCode())%dimensionSize, 1.0);
}
weighting.applyTo(vec);
return vec;
}
}
| 2,454 | 36.769231 | 241 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/TextDataLoader.java |
package jsat.text;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.SimpleDataSet;
import jsat.classifiers.CategoricalData;
import jsat.classifiers.DataPoint;
import jsat.datatransform.RemoveAttributeTransform;
import jsat.linear.SparseVector;
import jsat.linear.Vec;
import jsat.text.tokenizer.Tokenizer;
import jsat.text.wordweighting.WordWeighting;
import jsat.utils.IntList;
import jsat.utils.IntSet;
/**
* This class provides a framework for loading datasets made of Text documents
* as vectors. Text is broken up into a sequence of tokens using a
* {@link Tokenizer}, that must be provided. The weights used will be determined
* by some {@link WordWeighting word weighting scheme}. <br>
* The user adds documents to the initial dataset using the {@link #addOriginalDocument(java.lang.String)
* } method. The {@link #finishAdding() } must be called when no more documents
* are left to add, at which point class will take care of calling the {@link WordWeighting#setWeight(java.util.List, java.util.List)
* } method to configure the word weighting used with the original data
* added.<br>
* <br>
* After the initial dataset is loaded, new strings can be converted to vectors
* using the {@link #newText(java.lang.String) } method. This should only be
* called after {@link #finishAdding() }.<br>
* <br>
* Instance of this class will keep a reference to all originally added vectors.
* To transform new texts into vectors without keeping references to all of the
* original vectors, the {@link #getTextVectorCreator() } will return an object
* that perform the transformation.
*
* @author Edward Raff
*/
public abstract class TextDataLoader implements TextVectorCreator
{
private static final long serialVersionUID = -657253682338792871L;
/**
* List of original vectors
*/
protected final List<SparseVector> vectors;
/**
* Tokenizer to apply to input strings
*/
protected Tokenizer tokenizer;
/**
* Maps words to their associated index in an array
*/
protected ConcurrentHashMap<String, Integer> wordIndex;
/**
* list of all word tokens encountered in order of first observation
*/
protected List<String> allWords;
/**
* The map of integer counts of how many times each word token was seen. Key
* is the index of the word, value is the number of times it was seen. Using
* a map instead of a list so that it can be updated in a efficient thread
* safe way
*/
protected ConcurrentHashMap<Integer, AtomicInteger> termDocumentFrequencys;
private WordWeighting weighting;
/**
* Temporary work space to use for tokenization
*/
protected ThreadLocal<StringBuilder> workSpace;
/**
* Temporary storage space to use for tokenization
*/
protected ThreadLocal<List<String>> storageSpace;
/**
* Temporary space to use when creating vectors
*/
protected ThreadLocal<Map<String, Integer>> wordCounts;
private TextVectorCreator tvc;
/**
* true when {@link #finishAdding() } is called, and no new original
* documents can be inserted
*/
protected boolean noMoreAdding;
private final AtomicInteger currentLength = new AtomicInteger(0);
private volatile int documents;
/**
* Creates a new loader for text datasets
* @param tokenizer the tokenization method to break up strings with
* @param weighting the scheme to set the weights for feature vectors.
*/
public TextDataLoader(Tokenizer tokenizer, WordWeighting weighting)
{
this.vectors = new ArrayList<SparseVector>();
this.tokenizer = tokenizer;
this.wordIndex = new ConcurrentHashMap<String, Integer>();
this.termDocumentFrequencys = new ConcurrentHashMap<Integer, AtomicInteger>();
this.weighting = weighting;
this.allWords = new ArrayList<String>();
noMoreAdding = false;
this.workSpace = new ThreadLocal<StringBuilder>();
this.storageSpace = new ThreadLocal<List<String>>();
this.wordCounts = new ThreadLocal<Map<String, Integer>>();
}
/**
* This method will load all the text documents that make up the original
* data set from their source. For each document,
* {@link #addOriginalDocument(java.lang.String) } should be called with the
* text of the document. <br>
* This method will be called when {@link #getDataSet() } is called for the
* first time. <br>
* New document vectors can be obtained after loading by calling
* {@link #newText(java.lang.String) }.
*/
public abstract void initialLoad();
/**
* To be called by the {@link #initialLoad() } method.
* It will take in the text and add a new document
* vector to the data set. Once all text documents
* have been loaded, this method should never be
* called again. <br>
* <br>
* This method is thread safe.
*
* @param text the text of the document to add
* @return the index of the created document for the given text. Starts from
* zero and counts up.
*/
protected int addOriginalDocument(String text)
{
if(noMoreAdding)
throw new RuntimeException("Initial data set has been finalized");
StringBuilder localWorkSpace = workSpace.get();
List<String> localStorageSpace = storageSpace.get();
Map<String, Integer> localWordCounts = wordCounts.get();
if(localWorkSpace == null)
{
localWorkSpace = new StringBuilder();
localStorageSpace = new ArrayList<String>();
localWordCounts = new LinkedHashMap<String, Integer>();
workSpace.set(localWorkSpace);
storageSpace.set(localStorageSpace);
wordCounts.set(localWordCounts);
}
localWorkSpace.setLength(0);
localStorageSpace.clear();
localWordCounts.clear();
tokenizer.tokenize(text, localWorkSpace, localStorageSpace);
for(String word : localStorageSpace)
{
Integer count = localWordCounts.get(word);
if(count == null)
localWordCounts.put(word, 1);
else
localWordCounts.put(word, count+1);
}
SparseVector vec = new SparseVector(currentLength.get()+1, localWordCounts.size());//+1 to avoid issues when its length is zero, will be corrected in finalization step anyway
for(Iterator<Map.Entry<String, Integer>> iter = localWordCounts.entrySet().iterator(); iter.hasNext();)
{
Map.Entry<String, Integer> entry = iter.next();
String word = entry.getKey();
int ms_to_sleep = 1;
while(!addWord(word, vec, entry.getValue()))//try in a loop, expoential back off
{
try
{
Thread.sleep(ms_to_sleep);
ms_to_sleep = Math.min(100, ms_to_sleep*2);
}
catch (InterruptedException ex)
{
Logger.getLogger(TextDataLoader.class.getName()).log(Level.SEVERE, null, ex);
}
}
}
localWordCounts.clear();
synchronized(vectors)
{
vectors.add(vec);
return documents++;
}
}
/**
* Does the work to add a given word to the sparse vector. May not succeed
* in race conditions when two ore more threads are trying to add a word at
* the same time.
*
* @param word the word to add to the vector
* @param vec the location to store the word occurrence
* @param entry the number of times the word occurred
* @return {@code true} if the word was successfully added. {@code false} if
* the word wasn't added due to a race- and should be tried again
*/
private boolean addWord(String word, SparseVector vec, Integer value)
{
Integer indx = wordIndex.get(word);
if(indx == null)//this word has never been seen before!
{
Integer index_for_new_word;
if((index_for_new_word = wordIndex.putIfAbsent(word, -1)) == null)//I won the race to insert this word into the map
{
/*
* we need to do this increment after words to avoid a race
* condition where two people incrment currentLength for the
* same word, as that will throw off other word additions
* before we can fix the problem
*/
index_for_new_word = currentLength.getAndIncrement();
wordIndex.put(word, index_for_new_word);//overwrite with correct value
}
if(index_for_new_word < 0)
return false;
//possible race on tdf as well when two threads found same new word at the same time
AtomicInteger termCount = new AtomicInteger(0), tmp = null;
tmp = termDocumentFrequencys.putIfAbsent(index_for_new_word, termCount);
if(tmp != null)
termCount = tmp;
termCount.incrementAndGet();
int newLen = Math.max(index_for_new_word+1, vec.length());
vec.setLength(newLen);
vec.set(index_for_new_word, value);
}
else//this word has been seen before
{
if(indx < 0)
return false;
AtomicInteger toInc = termDocumentFrequencys.get(indx);
if (toInc == null)
{
//wordIndex and termDocumnetFrequences are not updated
//atomicly together, so could get index but not have tDF ready
toInc = termDocumentFrequencys.putIfAbsent(indx, new AtomicInteger(1));
if (toInc == null)//other person finished adding before we "fixed" via putIfAbsent
toInc = termDocumentFrequencys.get(indx);
}
toInc.incrementAndGet();
if (vec.length() <= indx)//happens when another thread sees the word first and adds it, then get check and find it- but haven't increased our vector legnth
vec.setLength(indx+1);
vec.set(indx, value);
}
return true;
}
/**
* Once all original documents have been added, this method is called so
* that post processing steps can be applied.
*/
protected void finishAdding()
{
noMoreAdding = true;
workSpace = null;
storageSpace = null;
wordCounts = null;
int finalLength = currentLength.get();
int[] frqs = new int[finalLength];
for(Map.Entry<Integer, AtomicInteger> entry : termDocumentFrequencys.entrySet())
frqs[entry.getKey()] = entry.getValue().get();
for(SparseVector vec : vectors)
{
//Make sure all the vectors have the same length
vec.setLength(finalLength);
}
weighting.setWeight(vectors, IntList.view(frqs, finalLength));
System.out.println("Final Length: " + finalLength);
for(SparseVector vec : vectors)
{
//Unlike normal index functions, WordWeighting needs to use the vector to do some set up first
weighting.applyTo(vec);
}
}
/**
* Returns a new data set containing the original data points that were
* loaded with this loader.
*
* @return an appropriate data set for this loader
*/
public DataSet getDataSet()
{
if(!noMoreAdding)
{
initialLoad();
finishAdding();
}
List<DataPoint> dataPoints= new ArrayList<DataPoint>(vectors.size());
for(SparseVector vec : vectors)
dataPoints.add(new DataPoint(vec, new int[0], new CategoricalData[0]));
return new SimpleDataSet(dataPoints);
}
/**
* To be called after all original texts have been loaded.
*
* @param text the text of the document to create a document vector from
* @return the sparce vector representing this document
*/
@Override
public Vec newText(String text)
{
if(!noMoreAdding)
throw new RuntimeException("Initial documents have not yet loaded");
return getTextVectorCreator().newText(text);
}
@Override
public Vec newText(String input, StringBuilder workSpace, List<String> storageSpace)
{
if(!noMoreAdding)
throw new RuntimeException("Initial documents have not yet loaded");
return getTextVectorCreator().newText(input, workSpace, storageSpace);
}
/**
* Returns the {@link TextVectorCreator} used by this data loader to convert
* documents into vectors.
*
* @return the text vector creator used by this class
*/
public TextVectorCreator getTextVectorCreator()
{
if(!noMoreAdding)
throw new RuntimeException("Initial documents have not yet loaded");
else if(tvc == null)
tvc = new BasicTextVectorCreator(tokenizer, wordIndex, weighting);
return tvc;
}
/**
* Returns the original token for the given index in the data set
* @param index the numeric feature index
* @return the word token associated with the index
*/
public String getWordForIndex(int index)
{
//lazy population of allWords array
if(allWords.size() != wordIndex.size())//we added since this was done
{
while(allWords.size() < wordIndex.size())
allWords.add("");
for(Map.Entry<String, Integer> entry : wordIndex.entrySet())
allWords.set(entry.getValue(), entry.getKey());
}
if(index >= 0 && index < allWords.size())
return allWords.get(index);
else
return null;
}
/**
* Return the number of times a token has been seen in the document
* @param index the numeric feature index
* @return the total occurrence count for the feature
*/
public int getTermFrequency(int index)
{
return termDocumentFrequencys.get(index).get();
}
/**
* Creates a new transform factory to remove all features for tokens that
* did not occur a certain number of times
* @param minCount the minimum number of occurrences to be kept as a feature
* @return a transform factory for removing features that did not occur
* often enough
*/
@SuppressWarnings("unchecked")
public RemoveAttributeTransform getMinimumOccurrenceDTF(int minCount)
{
final Set<Integer> numericToRemove = new IntSet();
for(int i = 0; i < termDocumentFrequencys.size(); i++)
if(termDocumentFrequencys.get(i).get() < minCount)
numericToRemove.add(i);
return new RemoveAttributeTransform(Collections.EMPTY_SET, numericToRemove);
}
}
| 15,365 | 36.478049 | 182 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/TextVectorCreator.java | package jsat.text;
import java.io.Serializable;
import java.util.List;
import jsat.linear.Vec;
/**
* A Text Vector Creator is an object that can convert a text string into a
* {@link Vec}
*
* @author Edward Raff
*/
public interface TextVectorCreator extends Serializable
{
/**
* Converts the given input text into a vector representation.
* @param input the input string
* @return a vector representation
*/
public Vec newText(String input);
/**
* Converts the given input text into a vector representation
* @param input the input string
* @param workSpace an already allocated (but empty) string builder than can
* be used as a temporary work space.
* @param storageSpace an already allocated (but empty) list to place the
* tokens into
* @return a vector representation
*/
public Vec newText(String input, StringBuilder workSpace, List<String> storageSpace);
}
| 957 | 28.030303 | 89 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/stemming/LovinsStemmer.java |
package jsat.text.stemming;
import java.util.HashMap;
/**
* Implements Lovins' stemming algorithm described here
* http://snowball.tartarus.org/algorithms/lovins/stemmer.html
*
* @author Edward Raff
*/
public class LovinsStemmer extends Stemmer
{
private static final long serialVersionUID = -3229865664217642197L;
//There are 11 ending hash maps, each postfixed with the number of characters
private static final HashMap<String, String> ending11 = new HashMap<String, String>()
{/**
*
*/
private static final long serialVersionUID = 4064350307133685150L;
{
put("alistically", "B"); put("arizability", "A"); put("izationally", "B");
}};
private static final HashMap<String, String> ending10 = new HashMap<String, String>()
{/**
*
*/
private static final long serialVersionUID = -5247798032923242997L;
{
put("antialness", "A"); put("arisations", "A"); put("arizations", "A");
put("entialness", "A");
}};
private static final HashMap<String, String> ending9 = new HashMap<String, String>()
{/**
*
*/
private static final long serialVersionUID = -9153017770847287495L;
{
put("allically", "C"); put("antaneous", "A"); put("antiality", "A");
put("arisation", "A"); put("arization", "A"); put("ationally", "B");
put("ativeness", "A"); put("eableness", "E"); put("entations", "A");
put("entiality", "A"); put("entialize", "A"); put("entiation", "A");
put("ionalness", "A"); put("istically", "A"); put("itousness", "A");
put("izability", "A"); put("izational", "A");
}};
private static final HashMap<String, String> ending8 = new HashMap<String, String>()
{/**
*
*/
private static final long serialVersionUID = 3671522347706544570L;
{
put("ableness", "A"); put("arizable", "A"); put("entation", "A");
put("entially", "A"); put("eousness", "A"); put("ibleness", "A");
put("icalness", "A"); put("ionalism", "A"); put("ionality", "A");
put("ionalize", "A"); put("iousness", "A"); put("izations", "A");
put("lessness", "A");
}};
private static final HashMap<String, String> ending7 = new HashMap<String, String>()
{/**
*
*/
private static final long serialVersionUID = -4697823317524161452L;
{
put("ability", "A"); put("aically", "A"); put("alistic", "B");
put("alities", "A"); put("ariness", "E"); put("aristic", "A");
put("arizing", "A"); put("ateness", "A"); put("atingly", "A");
put("ational", "B"); put("atively", "A"); put("ativism", "A");
put("elihood", "E"); put("encible", "A"); put("entally", "A");
put("entials", "A"); put("entiate", "A"); put("entness", "A");
put("fulness", "A"); put("ibility", "A"); put("icalism", "A");
put("icalist", "A"); put("icality", "A"); put("icalize", "A");
put("ication", "G"); put("icianry", "A"); put("ination", "A");
put("ingness", "A"); put("ionally", "A"); put("isation", "A");
put("ishness", "A"); put("istical", "A"); put("iteness", "A");
put("iveness", "A"); put("ivistic", "A"); put("ivities", "A");
put("ization", "F"); put("izement", "A"); put("oidally", "A");
put("ousness", "A");
}};
private static final HashMap<String, String> ending6 = new HashMap<String, String>()
{/**
*
*/
private static final long serialVersionUID = -7030401064572348271L;
{
put("aceous", "A"); put("acious", "B"); put("action", "G");
put("alness", "A"); put("ancial", "A"); put("ancies", "A");
put("ancing", "B"); put("ariser", "A"); put("arized", "A");
put("arizer", "A"); put("atable", "A"); put("ations", "B");
put("atives", "A"); put("eature", "Z"); put("efully", "A");
put("encies", "A"); put("encing", "A"); put("ential", "A");
put("enting", "C"); put("entist", "A"); put("eously", "A");
put("ialist", "A"); put("iality", "A"); put("ialize", "A");
put("ically", "A"); put("icance", "A"); put("icians", "A");
put("icists", "A"); put("ifully", "A"); put("ionals", "A");
put("ionate", "D"); put("ioning", "A"); put("ionist", "A");
put("iously", "A"); put("istics", "A"); put("izable", "E");
put("lessly", "A"); put("nesses", "A"); put("oidism", "A");
}};
private static final HashMap<String, String> ending5 = new HashMap<String, String>()
{/**
*
*/
private static final long serialVersionUID = -5282435864116373834L;
{
put("acies", "A"); put("acity", "A"); put("aging", "B");
put("aical", "A"); put("alism", "B"); put("ality", "A");
put("alize", "A"); put("allic", "b"); put("anced", "B");
put("ances", "B"); put("antic", "C"); put("arial", "A");
put("aries", "A"); put("arily", "A"); put("arity", "B");
put("arize", "A"); put("aroid", "A"); put("ately", "A");
put("ating", "I"); put("ation", "B"); put("ative", "A");
put("ators", "A"); put("atory", "A"); put("ature", "E");
put("early", "Y"); put("ehood", "A"); put("eless", "A");
put("ement", "A"); put("enced", "A"); put("ences", "A");
put("eness", "E"); put("ening", "E"); put("ental", "A");
put("ented", "C"); put("ently", "A"); put("fully", "A");
put("ially", "A"); put("icant", "A"); put("ician", "A");
put("icide", "A"); put("icism", "A"); put("icist", "A");
put("icity", "A"); put("idine", "I"); put("iedly", "A");
put("ihood", "A"); put("inate", "A"); put("iness", "A");
put("ingly", "B"); put("inism", "J"); put("inity", "c");
put("ional", "A"); put("ioned", "A"); put("ished", "A");
put("istic", "A"); put("ities", "A"); put("itous", "A");
put("ively", "A"); put("ivity", "A"); put("izers", "F");
put("izing", "F"); put("oidal", "A"); put("oides", "A");
put("otide", "A"); put("ously", "A");
}};
private static final HashMap<String, String> ending4 = new HashMap<String, String>()
{/**
*
*/
private static final long serialVersionUID = -7293777277850278026L;
{
put("able", "A"); put("ably", "A"); put("ages", "B");
put("ally", "B"); put("ance", "B"); put("ancy", "B");
put("ants", "B"); put("aric", "A"); put("arly", "K");
put("ated", "I"); put("ates", "A"); put("atic", "B");
put("ator", "A"); put("ealy", "Y"); put("edly", "E");
put("eful", "A"); put("eity", "A"); put("ence", "A");
put("ency", "A"); put("ened", "E"); put("enly", "E");
put("eous", "A"); put("hood", "A"); put("ials", "A");
put("ians", "A"); put("ible", "A"); put("ibly", "A");
put("ical", "A"); put("ides", "L"); put("iers", "A");
put("iful", "A"); put("ines", "M"); put("ings", "N");
put("ions", "B"); put("ious", "A"); put("isms", "B");
put("ists", "A"); put("itic", "H"); put("ized", "F");
put("izer", "F"); put("less", "A"); put("lily", "A");
put("ness", "A"); put("ogen", "A"); put("ward", "A");
put("wise", "A"); put("ying", "B"); put("yish", "A");
}};
private static final HashMap<String, String> ending3 = new HashMap<String, String>()
{/**
*
*/
private static final long serialVersionUID = -5629841014950478203L;
{
put("acy", "A"); put("age", "B"); put("aic", "A");
put("als", "b"); put("ant", "B"); put("ars", "O");
put("ary", "F"); put("ata", "A"); put("ate", "A");
put("eal", "Y"); put("ear", "Y"); put("ely", "E");
put("ene", "E"); put("ent", "C"); put("ery", "E");
put("ese", "A"); put("ful", "A"); put("ial", "A");
put("ian", "A"); put("ics", "A"); put("ide", "L");
put("ied", "A"); put("ier", "A"); put("ies", "P");
put("ily", "A"); put("ine", "M"); put("ing", "N");
put("ion", "Q"); put("ish", "C"); put("ism", "B");
put("ist", "A"); put("ite", "a"); put("ity", "A");
put("ium", "A"); put("ive", "A"); put("ize", "F");
put("oid", "A"); put("one", "R"); put("ous", "A");
}};
private static final HashMap<String, String> ending2 = new HashMap<String, String>()
{/**
*
*/
private static final long serialVersionUID = -8894812965945848256L;
{
put("ae", "A"); put("al", "b"); put("ar", "X");
put("as", "B"); put("ed", "E"); put("en", "F");
put("es", "E"); put("ia", "A"); put("ic", "A");
put("is", "A"); put("ly", "B"); put("on", "S");
put("or", "T"); put("um", "U"); put("us", "V");
put("yl", "R"); put("s\'", "A"); put("\'s", "A");
}};
private static final HashMap<String, String> ending1 = new HashMap<String, String>()
{/**
*
*/
private static final long serialVersionUID = -7536643426902207427L;
{
put("a", "A"); put("e", "A"); put("i", "A");
put("o", "A"); put("s", "W"); put("y", "B");
}};
private static final HashMap<String, String> endings = new HashMap<String, String>()
{/**
*
*/
private static final long serialVersionUID = -8057392854617089310L;
{
putAll(ending11); putAll(ending10); putAll(ending9); putAll(ending8); putAll(ending7); putAll(ending6);
putAll(ending5); putAll(ending4); putAll(ending3); putAll(ending2); putAll(ending1);
}};
private static String removeEnding(String word)
{
//The stem must contain at least 2 characters, so word-2 is the min
for(int i = Math.min(11, word.length()-2); i > 0; i--)
{
String ending = word.substring(word.length()-i);
String condition = endings.get(ending);
if(condition == null)
continue;
String stem = word.substring(0, word.length()-i);
switch(condition.charAt(0))
{
case 'A'://No restrictions on stem
return stem;
case 'B': //Minimum stem length = 3
if(stem.length() >= 3)
return stem;
break;
case 'C': //Minimum stem length = 4
if(stem.length() >= 4)
return stem;
break;
case 'D'://Minimum stem length = 5
if(stem.length() >= 5)
return stem;
break;
case 'E'://Do not remove ending after e
if(stem.endsWith("e"))
break;
return stem;
case 'F'://Minimum stem length = 3 and do not remove ending after e
if(stem.endsWith("e") || stem.length() < 3)
break;
return stem;
case 'G'://Minimum stem length = 3 and remove ending only after f
if(stem.endsWith("f") && stem.length() >= 3)
return stem;
break;
case 'H'://Remove ending only after t or ll
if(stem.endsWith("t") || stem.endsWith("ll"))
return stem;
break;
case 'I'://Do not remove ending after o or e
if(stem.endsWith("o") || stem.endsWith("e"))
break;
return stem;
case 'J': //Do not remove ending after a or e
if(stem.endsWith("a") || stem.endsWith("e"))
break;
return stem;
case 'K'://Minimum stem length = 3 and remove ending only after l, i or u*e
if(stem.length() >= 3 && stem.matches(".*(i|u.e|l)$"))
return stem;
break;
case 'L'://Do not remove ending after u, x or s, unless s follows o
if(stem.endsWith("os"))
return stem;
else if(stem.matches(".*(u|x|s)$"))
break;
return stem;
case 'M'://Do not remove ending after a, c, e or m
if(stem.endsWith("a") || stem.endsWith("c") || stem.endsWith("e") || stem.endsWith("m"))
break;
else
return stem;
case 'N'://Minimum stem length = 4 after s**, elsewhere = 3
if (stem.matches(".*s..$"))
if (stem.length() >= 4)
return stem;
else
break;
else if (stem.length() >= 3)
return stem;
break;
case 'O'://Remove ending only after l or i
if(stem.endsWith("l") || stem.endsWith("i"))
return stem;
break;
case 'P'://Do not remove ending after c
if(stem.endsWith("e"))
break;
return stem;
case 'Q'://Minimum stem length = 3 and do not remove ending after l or n
if(stem.length() < 3 || stem.endsWith("l") || stem.endsWith("n"))
break;
return stem;
case 'R'://Remove ending only after n or r
if(stem.endsWith("n") || stem.endsWith("r"))
return stem;
break;
case 'S'://Remove ending only after dr or t, unless t follows t
if(stem.endsWith("dr") || (stem.endsWith("t") && !stem.endsWith("tt")))
return stem;
break;
case 'T'://Remove ending only after s or t, unless t follows o
if(stem.endsWith("s") || (stem.endsWith("t") && !stem.endsWith("ot")))
return stem;
break;
case 'U'://Remove ending only after l, m, n or r
if(stem.endsWith("l") || stem.endsWith("m") || stem.endsWith("n") || stem.endsWith("r"))
return stem;
break;
case 'V'://Remove ending only after c
if(stem.endsWith("c"))
return stem;
break;
case 'W'://Do not remove ending after s or u
if(stem.endsWith("s") || stem.endsWith("u"))
break;
return stem;
case 'X'://Remove ending only after l, i or u*e
if(stem.matches(".*(l|i|u.e)$"))
return stem;
break;
case 'Y'://Remove ending only after in
if(stem.endsWith("in"))
return stem;
break;
case 'Z'://Do not remove ending after f
if(stem.endsWith("f"))
break;
return stem;
case 'a'://AA: Remove ending only after d, f, ph, th, l, er, or, es or t
if(stem.matches(".*(d|f|ph|th|l|er|or|es|t)$"))
return stem;
break;
case 'b'://BB: Minimum stem length = 3 and do not remove ending after met or ryst
if(stem.length() < 3 || stem.endsWith("met") || stem.endsWith("ryst"))
break;
return stem;
case 'c'://CC: Remove ending only after l
if(stem.endsWith("l"))
return stem;
break;
}
}
return word;
}
private static String fixStem(String stem)
{
//Rule 1 remove one of double b, d, g, l, m, n, p, r, s, t
char lastChar = stem.charAt(stem.length()-1);
stem = stem.replaceFirst("(dd|bb|gg|ll|mm|nn|pp|rr|ss|tt)$", "" + lastChar);
//Rule 2
stem = stem.replaceFirst("iev$", "ief");
//Rule 3
stem = stem.replaceFirst("uct$", "uc");
//Rule 4
stem = stem.replaceFirst("umpt$", "um");
//Rule 5
stem = stem.replaceFirst("rpt$", "rb");
//Rule 6
stem = stem.replaceFirst("urs$", "ur");
//Rule 7
stem = stem.replaceFirst("istr$", "ister");
//Rule 7a
stem = stem.replaceFirst("metr$", "meter");
//Rule 8
stem = stem.replaceFirst("olv$", "olut");
//Rule 9
if(stem.endsWith("ul") && !stem.endsWith("aoiul"))
stem = stem.replaceFirst("[^aoi]ul$", "l");
//Rule 10
stem = stem.replaceFirst("bex$", "bic");
//Rule 11
stem = stem.replaceFirst("dex$", "dic");
//Rule 12
stem = stem.replaceFirst("pex$", "pic");
//Rule 13
stem = stem.replaceFirst("tex$", "tic");
//Rule 14
stem = stem.replaceFirst("ax$", "ac");
//Rule 15
stem = stem.replaceFirst("ex$", "ec");
//Rule 16
stem = stem.replaceFirst("ix$", "ic");
//Rule 17
stem = stem.replaceFirst("lux$", "luc");
//Rule 18
stem = stem.replaceFirst("uad$", "uas");
//Rule 19
stem = stem.replaceFirst("vad$", "vas");
//Rule 20
stem = stem.replaceFirst("cid$", "cis");
//Rule 21
stem = stem.replaceFirst("lid$", "lis");
//Rule 22
stem = stem.replaceFirst("erid$", "eris");
//Rule 23
stem = stem.replaceFirst("pand$", "pans");
//Rule 24
if(stem.endsWith("end") && !stem.endsWith("send"))
stem = stem.replaceFirst("[^s]end$", "ens");
//Rule 25
stem = stem.replaceFirst("ond$", "ons");
//Rule 26
stem = stem.replaceFirst("lud$", "lus");
//Rule 27
stem = stem.replaceFirst("rud$", "rus");
//Rule 28
stem = stem.replaceFirst("[^pt]her$", "hes");
//Rule 29
stem = stem.replaceFirst("mit$", "mis");
//Rule 30
if(stem.endsWith("ent") && !stem.endsWith("ment"))
stem = stem.replaceFirst("[^m]ent$", "ens");
//Rule 31
stem = stem.replaceFirst("ert$", "ers");
//Rule 32
if(stem.endsWith("et") && !stem.endsWith("net"))
stem = stem.replaceFirst("et$", "es");
//Rule 33
stem = stem.replaceFirst("yt$", "ys");
//Rule 34
stem = stem.replaceFirst("yz$", "ys");
return stem;
}
public String stem(String word)
{
return fixStem(removeEnding(word));
}
}
| 18,798 | 39.867391 | 111 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/stemming/PaiceHuskStemmer.java | package jsat.text.stemming;
/**
* Provides an implementation of the Paice Husk stemmer as described in: <br>
* Paice, C. D. (1990). <i>Another Stemmer</i>. ACM SIGIR Forum, 4(3), 56–61.
* @author Edward Raff
*/
public class PaiceHuskStemmer extends Stemmer
{
private static final long serialVersionUID = -5949389288166850651L;
static private class Rule
{
/**
* The ending to try and match
*/
public final String ending;
/**
* How many characters from the end of the string to remove
*/
public final int toRemove;
/**
* The string to append the ending with
*/
public final String newEnding;
/**
* Indicates that this rule may only be applied if the word has not been
* modified before
*/
public final boolean virgin;
/**
* Indicates that the stemming process should exit if this rule is applied
*/
public final boolean terminal;
public Rule(String ending, int toRemove, String newEnding, boolean virgin, boolean terminal)
{
this.ending = ending;
this.toRemove = toRemove;
this.newEnding = newEnding;
this.virgin = virgin;
this.terminal = terminal;
}
/**
* If valid, returns the modified input string based on this rule. If no
* modification was done, the exact same string that was passed in is
* returned. An object comparison can then be done to check if the
* string was modified. <br>
* Appropriate handling of virgin and terminal flags is up to the user
* @param input the unstemmed input
* @return the stemmed output
*/
public String apply(String input)
{
if(input.endsWith(ending))
{
if(isVowel(input.charAt(0)))
{
//Stats with a vowel, stemmed result must be at least 2 chars long
if(input.length()-toRemove+newEnding.length() < 2)
return input;
}
else//Starts with a consonant, 3 lets must remain
{
if(input.length()-toRemove+newEnding.length() < 3)
return input;//Not long enought
//Result must also contain at least one vowel
boolean noVowels = true;
for(int i = 0; i < input.length()-toRemove && noVowels; i++)
if(isVowel(input.charAt(i)) || input.charAt(i)== 'y')
noVowels = false;
for(int i = 0; i < newEnding.length() && noVowels; i++)
if(isVowel(newEnding.charAt(i)) || newEnding.charAt(i)== 'y')
noVowels = false;
if(noVowels)
return input;//No vowels left, stemmin is not valid to aply
}
//We made it, we can apply the stem and return a new string
if(toRemove == 0)//Proctected word, return a new string explicitly to be super sure
return new String(input);
return input.substring(0, input.length()-toRemove) + newEnding;
}
return input;
}
}
/*
* Oreded alphabetically by ending, meaning the rules should be attempted in
* the order they are presented in the array
*/
private static final Rule[] ARules = new Rule[]
{
new Rule("ia", 2, "", true, true), //ai*2.
new Rule("a", 1, "", true, true), //a*1.
};
private static final Rule[] BRules = new Rule[]
{
new Rule("bb", 1, "", false, true), //bb1.
};
private static final Rule[] CRules = new Rule[]
{
new Rule("ytic", 3, "s", false, true),//city3s.
new Rule("ic", 2, "", false, false), //ci2>
new Rule("nc", 1, "t", false, false), //cn1t>
};
private static final Rule[] DRules = new Rule[]
{
new Rule("dd", 1, "", false, true), //dd1.
new Rule("ied", 3, "y", false, false),//dei3y>
new Rule("ceed", 2, "ss", false, true), //deec2ss.
new Rule("eed", 1, "", false, true), //dee1.
new Rule("ed", 2, "", false, false), //de2>
new Rule("hood", 4, "", false, false), //dooh4>
};
private static final Rule[] ERules = new Rule[]
{
new Rule("e", 1, "", false, false), //e1>
};
private static final Rule[] FRules = new Rule[]
{
new Rule("lief", 1, "v", false, true), //feil1v.
new Rule("if", 2, "", false, true), //fi2>
};
private static final Rule[] GRules = new Rule[]
{
new Rule("ing", 3, "", false, false), //gni3>
new Rule("iag", 3, "y", false, true), //gai3y.
new Rule("ag", 2, "", false, false), //ga2>
new Rule("gg", 1, "", false, true), //gg1.
};
private static final Rule[] HRules = new Rule[]
{
new Rule("th", 2, "", true, true), //ht*2.
new Rule("guish", 5, "ct", false, true), //hsiug5ct.
new Rule("ish", 3, "", false, false), //hsi3>
};
private static final Rule[] IRules = new Rule[]
{
new Rule("i", 1, "", true, true), //i*1.
new Rule("i", 1, "y", false, false), //i1y>
};
private static final Rule[] JRules = new Rule[]
{
new Rule("ij", 1, "d", false, true), //ji1d.
new Rule("fuj", 1, "S", false, true), //juf1s.
new Rule("uj", 1, "d", false, true), //ju1d.
new Rule("oj", 1, "d", false, true), //jo1d.
new Rule("hej", 1, "r", false, true), //jeh1r.
new Rule("verj", 1, "t", false, true), //jrev1t.
new Rule("misj", 2, "t", false, true), //jsim2t.
new Rule("nj", 1, "d", false, true), //jn1d.
new Rule("j", 1, "s", false, true), //j1s.
};
private static final Rule[] LRules = new Rule[]
{
new Rule("ifiabl", 6, "", false, true), //lbaifi6.
new Rule("iabl", 4, "y", false, true), //lbai4y.
new Rule("abl", 3, "", false, false), //lba3>
new Rule("ibl", 3, "", false, true), //lbi3.
new Rule("bil", 2, "l", false, false), //lib2l>
new Rule("cl", 1, "", false, true), //lc1.
new Rule("iful", 4, "y", false, true), //lufi4y.
new Rule("ful", 3, "", false, false), //luf3>
new Rule("uf", 2, "", false, true), //lu2.
new Rule("ial", 3, "", false, false), //lai3>
new Rule("ual", 3, "", false, false), //lau3>
new Rule("al", 2, "", false, false), //la2>
new Rule("ll", 1, "", false, true), //ll1.
};
private static final Rule[] MRules = new Rule[]
{
new Rule("ium", 3, "", false, true), //mui3.
new Rule("mu", 2, "", true, true), //mu*2.
new Rule("ism", 3, "", false, false), //msi3>
new Rule("mm", 1, "", false, true), //mm1.
};
private static final Rule[] NRules = new Rule[]
{
new Rule("sion", 4, "j", false, false), //nois4j>
new Rule("xion", 4, "ct", false, true), //noix4ct.
new Rule("ion", 3, "", false, false), //noi3>
new Rule("ian", 3, "", false, false), //nai3>
new Rule("an", 2, "", false, false), //na2>
new Rule("een", 0, "", false, true), //nee0.
new Rule("en", 2, "", false, false), //ne2>
new Rule("nn", 1, "", false, true), //nn1.
};
private static final Rule[] PRules = new Rule[]
{
new Rule("ship", 4, "", false, false), //pihs4>
new Rule("pp", 1, "", false, true), //pp1.
};
private static final Rule[] RRules = new Rule[]
{
new Rule("er", 2, "", false, false), //re2>
new Rule("ear", 0, "", false, true), //rea0.
new Rule("ar", 2, "", false, true), //ra2.
new Rule("or", 2, "", false, false), //ro2>
new Rule("ur", 2, "", false, false), //ru2>
new Rule("rr", 1, "", false, true), //rr1.
new Rule("tr", 1, "", false, false), //rt1>
new Rule("ier", 3, "y", false, false), //rei3y>
};
private static final Rule[] SRules = new Rule[]
{
new Rule("ies", 3, "y", false, false), //sei3y>
new Rule("sis", 2, "", false, true), //sis2.
new Rule("ness", 4, "", false, false), //ssen4>
new Rule("ss", 0, "", false, true), //ss0.
new Rule("ous", 3, "", false, false), //suo3>
new Rule("us", 2, "", true, true), //su*2.
new Rule("s", 1, "", true, false), //s*1>
new Rule("s", 0, "", false, true), //s0.
};
private static final Rule[] TRules = new Rule[]
{
new Rule("plicat", 4, "y", false, true), //tacilp4y.
new Rule("at", 2, "", false, false), //ta2>
new Rule("ment", 4, "", false, false), //tnem4>
new Rule("ent", 3, "", false, false), //tne3>
new Rule("ant", 3, "", false, false), //tna3>
new Rule("ript", 2, "b", false, true), //tpir2b.
new Rule("orpt", 2, "b", false, true), //tpro2b.
new Rule("duct", 1, "", false, true), //tcud1.
new Rule("sumpt", 2, "", false, true), //tpmus2.
new Rule("cept", 2, "iv", false, true), //tpec2iv.
new Rule("olut", 2, "v", false, true), //tulo2v.
new Rule("sist", 0, "", false, true), //tsis0.
new Rule("ist", 3, "", false, false), //tsi3>
new Rule("tt", 1, "", false, true), //tt1.
};
private static final Rule[] URules = new Rule[]
{
new Rule("iqu", 3, "", false, true), //uqi3.
new Rule("ogu", 1, "", false, true), //ugo1.
};
private static final Rule[] VRules = new Rule[]
{
new Rule("siv", 3, "j", false, false), //vis3j>
new Rule("iev", 0, "", false, true), //vie0.
new Rule("iv", 2, "", false, false), //vi2>
};
private static final Rule[] YRules = new Rule[]
{
new Rule("bly", 1, "", false, false), //ylb1>
new Rule("ily", 3, "y", false, false), //yli3y>
new Rule("ply", 0, "", false, true), //ylp0.
new Rule("ly", 2, "", false, false), //yl2>
new Rule("ogy", 1, "", false, true), //ygo1.
new Rule("phy", 1, "", false, true), //yhp1.
new Rule("omy", 1, "", false, true), //ymo1.
new Rule("opy", 1, "", false, true), //ypo1.
new Rule("ity", 3, "", false, false), //yti3>
new Rule("ety", 3, "", false, false), //yte3>
new Rule("lty", 2, "", false, true), //ytl2.
new Rule("istry", 5, "", false, true), //yrtsi5.
new Rule("ary", 3, "", false, false), //yra3>
new Rule("ory", 3, "", false, false), //yro3>
new Rule("ify", 3, "", false, true), //yfi3.
new Rule("ncy", 2, "t", false, false), //ycn2t>
new Rule("acy", 3, "", false, false), //yca3>
};
private static final Rule[] ZRules = new Rule[]
{
new Rule("iz", 2, "", false, false), //zi2>
new Rule("yz", 1, "s", false, true), //zy1s.
};
private static final Rule[] NoRules = new Rule[0];
/**
* The rules for the
*/
private static final Rule[][] rules = new Rule[][]
{
ARules, BRules, CRules, DRules, ERules,
FRules, GRules, HRules, IRules, JRules,
NoRules, LRules, MRules, NRules, NoRules,
PRules, NoRules, RRules, SRules, TRules,
URules, VRules, NoRules, NoRules, YRules,
ZRules
};
private static boolean isVowel(char letter)
{
return letter == 'a' || letter == 'e' || letter == 'i' || letter == 'o' || letter == 'u';
}
@Override
public String stem(String word)
{
boolean virginRound = true;
boolean stop;
int charOffset = "a".charAt(0);
do
{
stop = true;
int ruleIndex = word.charAt(word.length()-1)-charOffset;
if(ruleIndex < 0 || ruleIndex > rules.length)
continue;
for(Rule rule : rules[ruleIndex])
{
if(rule.virgin && !virginRound)
continue;
String test = rule.apply(word);
if(test != word)//Rule was applied, is it acceptable?
{
word = test;
stop = false;
if(rule.terminal)
return word;
else
break;
}
}
virginRound = false;
}
while(!stop);
return word;
}
}
| 12,808 | 34.77933 | 100 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/stemming/PorterStemmer.java |
package jsat.text.stemming;
import java.util.LinkedHashMap;
import java.util.Map;
/**
* Implements Porter's stemming algorithm http://tartarus.org/~martin/PorterStemmer/def.txt . <br>
* Implemented for ease of understanding and legibility rather than performance.
* @author Edward Raff
*/
public class PorterStemmer extends Stemmer
{
private static final long serialVersionUID = -3809291457988435043L;
static final Map<String, String> step2_endings = new LinkedHashMap<String, String>();
static
{
step2_endings.put("ational", "ate");
step2_endings.put("tional", "tion");
step2_endings.put("enci", "ence");
step2_endings.put("anci", "ance");
step2_endings.put("izer", "ize");
step2_endings.put("abli", "able");
step2_endings.put("alli", "al");
step2_endings.put("entli", "ent");
step2_endings.put("eli", "e");
step2_endings.put("ousli", "ous");
step2_endings.put("ization", "ize");
step2_endings.put("ation", "ate");
step2_endings.put("ator", "ate");
step2_endings.put("alsim", "al");
step2_endings.put("iveness", "ive");
step2_endings.put("fulness", "ful");
step2_endings.put("ousness", "ous");
step2_endings.put("aliti", "al");
step2_endings.put("iviti", "ive");
step2_endings.put("biliti", "ble");
}
static final Map<String, String> step3_endings = new LinkedHashMap<String, String>();
static
{
step3_endings.put("icate", "ic");
step3_endings.put("ative", "");
step3_endings.put("alize", "al");
step3_endings.put("iciti", "ic");
step3_endings.put("ical", "ic");
step3_endings.put("ful", "");
step3_endings.put("ness", "");
}
static final Map<String, String> step4_endings = new LinkedHashMap<String, String>();
static
{
step4_endings.put("al", "");
step4_endings.put("ance", "");
step4_endings.put("ence", "");
step4_endings.put("er", "");
step4_endings.put("ic", "");
step4_endings.put("able", "");
step4_endings.put("ible", "");
step4_endings.put("ant", "");
step4_endings.put("ement", "");
step4_endings.put("ment", "");
step4_endings.put("ent", "");
step4_endings.put("ion", "");
step4_endings.put("ou", "");
step4_endings.put("ism", "");
step4_endings.put("ate", "");
step4_endings.put("iti", "");
step4_endings.put("ous", "");
step4_endings.put("ive", "");
step4_endings.put("ize", "");
}
@Override
public String stem(String s)
{
String tmp;
//Step 1a
if (s.endsWith("sses"))
s = s.replaceAll("sses$", "ss");
else if (s.endsWith("ies"))
s = s.replaceAll("ies$", "i");
else if(s.endsWith("ss"))
{
//Do nothing
}
else if(s.endsWith("s"))
s = s.substring(0, s.length()-1);
//Step 1b
boolean step1b_specialCase = false;//If the second or third of the rules in Step 1b is successful
if (s.endsWith("eed"))
{
tmp = s.replaceAll("eed$", "ee");
if(measure(tmp) > 0)
s = tmp;
}
else if (s.endsWith("ed"))
{
tmp = s.replaceAll("ed$", "");
if(containsVowel(tmp))
{
s = tmp;
step1b_specialCase = true;
}
}
else if (s.endsWith("ing"))
{
tmp = s.replaceAll("ing$", "");
if(containsVowel(tmp))
{
s = tmp;
step1b_specialCase = true;
}
}
if (step1b_specialCase)
{
if (s.endsWith("at"))
s = s.concat("e");
else if (s.endsWith("bl"))
s = s.concat("e");
else if (s.endsWith("iz"))
s = s.concat("e");
else if(doubleConstant(s, 'l', 's', 'z'))
s = s.substring(0, s.length()-1);//remove last letter
else if(oRule(s) && measure(s) == 1)
s = s.concat("e");
}
//Step 1c
if(s.endsWith("y") && containsVowel(s.substring(0, s.length()-1)))
s = s.substring(0, s.length()-1).concat("i");
//Step 2
for (Map.Entry<String, String> entry : step2_endings.entrySet())
if (s.endsWith(entry.getKey()))
{
tmp = s.replaceAll(entry.getKey() + "$", entry.getValue());
if (measure(tmp) > 0)
{
s = tmp;
break;
}
}
//Step 3
for (Map.Entry<String, String> entry : step3_endings.entrySet())
if (s.endsWith(entry.getKey()))
{
tmp = s.replaceAll(entry.getKey() + "$", entry.getValue());
if (measure(tmp) > 0)
{
s = tmp;
break;
}
}
//Step 4
for (Map.Entry<String, String> entry : step4_endings.entrySet())
if (s.endsWith(entry.getKey()))
{
if(s.endsWith("ion") && !(s.length() >= 4 && (s.charAt(s.length()-4) == 's' || s.charAt(s.length()-4) == 't')))
continue;//special case on ion, and they didn't match
tmp = s.replaceAll(entry.getKey() + "$", entry.getValue());
if (measure(tmp) > 1)
{
s = tmp;
break;
}
}
//Step 5a
if (s.endsWith("e"))
{
tmp = s.substring(0, s.length() - 1);
if(measure(tmp) > 1)
s = tmp;
else if(measure(tmp) == 1 && !oRule(tmp))
s = tmp;
}
//Step 5b
int lp = s.length()-1;
if(lp < 1)
return s;
if(s.charAt(lp) == s.charAt(lp-1) && s.charAt(lp) == 'l')
{
tmp = s.substring(0, s.length() - 1);
if(measure(tmp) > 1)
s = tmp;
}
return s;
}
private static int measure(String s)
{
return measure(s, 0, s.length());
}
private static int measure(String c, int start, int length)
{
//[C](VC){m}[V]
//Measure == the value of m in the above exprsion
int pos = start;
int m = 0;
//Move past first C, now we are detecing (VC){m}[V]
while(!isVowel(c, pos) && pos < (length - start))
pos++;
boolean vFollowedByC = false;
do
{
vFollowedByC = false;
while (isVowel(c, pos)&& pos < (length-start))
pos++;
while (!isVowel(c, pos) && pos < (length-start))
{
pos++;
vFollowedByC = true;
}
m++;
}
while (pos < (length - start) && vFollowedByC);
if(vFollowedByC)//VC <- endded like that, it counts
return m;
else//V <- ended in V, dosnt count
return m-1;
}
private static boolean isVowel(String s, int pos)
{
/*
* A \consonant\ in a word is a letter other than A, E, I, O or U, and other
* than Y preceded by a consonant.
*/
if (pos >= s.length())
return false;
switch (s.charAt(pos))
{
case 'a':
case 'e':
case 'i':
case 'o':
case 'u':
return true;
case 'y':
if (pos == s.length() - 1)//end of the array
return true;
return isVowel(s, pos + 1);//Y preceded by a constant is a Vowel
default:
return false;
}
}
/**
* *o - the stem ends cvc, where the second c is not W, X or Y (e.g. -WIL, -HOP).
*/
private static boolean oRule(String s)
{
int pos = s.length()-1;
if(pos < 2)
return false;
if (!isVowel(s, pos) && isVowel(s, pos - 1) && !isVowel(s, pos - 2))
{
switch (s.charAt(pos))
{
case 'w':
case 'x':
case 'y':
return false;
default:
return true;
}
}
return false;
}
private static boolean containsVowel(String s)
{
for (int i = 0; i < s.length(); i++)
if (isVowel(s, i))
return true;
return false;
}
private static boolean doubleConstant(String s, char... except)
{
if (s.length() <= 1)
return false;
char c;
if ((c = s.charAt(s.length() - 1)) == s.charAt(s.length() - 2))
{
for (char e : except)
if (c == e)
return false;
return true;
}
return false;
}
}
| 9,228 | 28.298413 | 127 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/stemming/Stemmer.java |
package jsat.text.stemming;
import java.io.Serializable;
import java.util.List;
/**
* Stemmers are algorithms that attempt reduce strings to their common stem or
* root word. For example, a stemmer might idly reduce "runs" "running" and
* "ran" to the single stem word "run". This reduces the feature space size,
* and allows multiple words that have the same meaning to be counted together.
* <br>
* Do not expect perfect results from stemming. This class provides the
* contract for a stemmer that does not have any word history.
*
* @author Edward Raff
*/
public abstract class Stemmer implements Serializable
{
private static final long serialVersionUID = 1889842876393488149L;
/**
* Reduce the given input to its stem word
* @param word the unstemmed input word
* @return the stemmed version of the word
*/
abstract public String stem(String word);
/**
* Replaces each value in the list with the stemmed version of the word
* @param list the list to apply stemming to
*/
public void applyTo(List<String> list)
{
for(int i = 0; i < list.size(); i++)
list.set(i, stem(list.get(i)));
}
/**
* Replaces each value in the array with the stemmed version of the word
* @param arr the array to apply stemming to
*/
public void applyTo(String[] arr)
{
for(int i = 0; i < arr.length; i++)
arr[i] = stem(arr[i]);
}
}
| 1,469 | 28.4 | 80 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/stemming/VoidStemmer.java |
package jsat.text.stemming;
/**
* The most naive of stemming possible, this class simply returns whatever string is given to it.
* @author Edward Raff
*/
public class VoidStemmer extends Stemmer
{
private static final long serialVersionUID = -5059926028932641447L;
public String stem(String word)
{
return word;
}
}
| 348 | 16.45 | 98 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/tokenizer/NGramTokenizer.java | package jsat.text.tokenizer;
import java.util.ArrayList;
import java.util.List;
/**
* This tokenizer creates n-grams, which are sequences of tokens combined into
* their own larger token. For example, "the dog barked" could be a 3-gram. If
* all sub n-grams are also being generated, the returned set would contain the
* 1-grams "the", "dog", and "barked", the 2-grams "the dog" and "dog barked",
* and the aforementioned 3-gram. For this to work, this tokenizer assumes the
* base tokenizer returns tokens in the order they were seen. <br>
* Note that n-grams can significantly increase the number of unique tokens, and
* n-grams are inherently rarer than the 1-grams they are generated from.
*
* @author Edward Raff
*/
public class NGramTokenizer implements Tokenizer
{
private static final long serialVersionUID = 7551087420391197139L;
/**
* The number of n-grams to generate
*/
private int n;
/**
* The base tokenizer
*/
private Tokenizer base;
/**
* whether or not to generate all sub n-grams
*/
private boolean allSubN;
/**
* Creates a new n-gramer
* @param n the length of the ngrams. While it should be greater than 1, 1
* is still a valid input.
* @param base the base tokenizer to create n-grams from
* @param allSubN {@code true} to generate all sub n-grams, {@code false} to
* only return the n-grams specified
*/
public NGramTokenizer(int n, Tokenizer base, boolean allSubN)
{
if(n <= 0)
throw new IllegalArgumentException("Number of n-grams must be positive, not " + n);
this.n = n;
this.base = base;
this.allSubN = allSubN;
}
@Override
public List<String> tokenize(String input)
{
List<String> storageSpace = new ArrayList<String>();
tokenize(input, new StringBuilder(), storageSpace);
return storageSpace;
}
@Override
public void tokenize(String input, StringBuilder workSpace, List<String> storageSpace)
{
base.tokenize(input, workSpace, storageSpace);//the "1-grams"
int origSize = storageSpace.size();
if(n == 1)
return;//nothing more to do
for (int i = 1; i < origSize; i++)//slide from left to right on the 1-grams
{
//generate the n-grams from 2 to n
for (int gramSize = allSubN ? 2 : n; gramSize <= n; gramSize++)
{
workSpace.setLength(0);
int j = i - (gramSize - 1);
if(j < 0)//means we are going past what we have, and we would be adding duplicates
continue;
for(; j < i; j++)
{
if (workSpace.length() > 0)
workSpace.append(' ');
workSpace.append(storageSpace.get(j));
}
workSpace.append(' ').append(storageSpace.get(i));
storageSpace.add(workSpace.toString());
}
}
if(!allSubN)//dont generate subs! get rid of those dirty 1-grams
storageSpace.subList(0, origSize).clear();
}
}
| 3,194 | 32.989362 | 98 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/tokenizer/NaiveTokenizer.java |
package jsat.text.tokenizer;
import java.util.ArrayList;
import java.util.List;
/**
*
* A simple tokenizer. It converts everything to lower case, and splits on white
* space. Anything that is not a letter, digit, or space, is treated as white
* space. This behavior can be altered slightly, and allows for setting a
* minimum and maximum allowed length for tokens. This can be useful when
* dealing with noisy documents, and removing small words. <br>
*
* @author Edward Raff
*/
public class NaiveTokenizer implements Tokenizer
{
private static final long serialVersionUID = -2112091783442076933L;
private boolean useLowerCase;
private boolean otherToWhiteSpace = true;
private boolean noDigits = false;
private int minTokenLength = 1;
private int maxTokenLength = Integer.MAX_VALUE;
/**
* Creates a new naive tokenizer that converts words to lower case
*/
public NaiveTokenizer()
{
this(true);
}
/**
* Creates a new naive tokenizer
*
* @param useLowerCase {@code true} to convert everything to lower,
* {@code false} to leave the case as is
*/
public NaiveTokenizer(boolean useLowerCase)
{
this.useLowerCase = useLowerCase;
}
/**
* Sets whether or not characters are made to be lower case or not
* @param useLowerCase
*/
public void setUseLowerCase(boolean useLowerCase)
{
this.useLowerCase = useLowerCase;
}
/**
* Returns {@code true} if letters are converted to lower case,
* {@code false} for case sensitive
* @return {@code true} if letters are converted to lower case,
*/
public boolean isUseLowerCase()
{
return useLowerCase;
}
/**
* Sets whether or not all non letter and digit characters are treated as
* white space, or ignored completely. If ignored, the tokenizer parses the
* string as if all non letter, digit, and whitespace characters did not
* exist in the original string.<br>
* <br>
* Setting this to {@code false} can result in a lower feature count,
* especially for noisy documents.
* @param otherToWhiteSpace {@code true} to treat all "other" characters as
* white space, {@code false} to ignore them
*/
public void setOtherToWhiteSpace(boolean otherToWhiteSpace)
{
this.otherToWhiteSpace = otherToWhiteSpace;
}
/**
* Returns whether or not all other illegal characters are treated as
* whitespace, or ignored completely.
* @return {@code true} if all other characters are treated as whitespace
*/
public boolean isOtherToWhiteSpace()
{
return otherToWhiteSpace;
}
@Override
public List<String> tokenize(String input)
{
ArrayList<String> toRet = new ArrayList<String>();
StringBuilder sb = new StringBuilder(input.length()/10);
tokenize(input, sb, toRet);
return toRet;
}
@Override
public void tokenize(String input, StringBuilder workSpace, List<String> storageSpace)
{
for(int i = 0; i < input.length(); i++)
{
char c = input.charAt(i);
if(Character.isLetter(c))
if (useLowerCase)
workSpace.append(Character.toLowerCase(c));
else
workSpace.append(c);
else if (!noDigits && Character.isDigit(c))
workSpace.append(c);
else if(!otherToWhiteSpace && !Character.isWhitespace(c))
continue;
else //end of token
{
if(workSpace.length() >= minTokenLength && workSpace.length() <= maxTokenLength)
storageSpace.add(workSpace.toString());
workSpace.setLength(0);
}
}
if(workSpace.length() >= minTokenLength && workSpace.length() <= maxTokenLength)
storageSpace.add(workSpace.toString());
}
/**
* Sets the maximum allowed length for any token. Any token discovered
* exceeding the length will not be accepted and skipped over. The default
* is unbounded.
*
* @param maxTokenLength the maximum token length to accept as a valid token
*/
public void setMaxTokenLength(int maxTokenLength)
{
if(maxTokenLength < 1)
throw new IllegalArgumentException("Max token length must be positive, not " + maxTokenLength);
if(maxTokenLength <= minTokenLength)
throw new IllegalArgumentException("Max token length must be larger than the min token length");
this.maxTokenLength = maxTokenLength;
}
/**
* Returns the maximum allowed token length
* @return the maximum allowed token length
*/
public int getMaxTokenLength()
{
return maxTokenLength;
}
/**
* Sets the minimum allowed token length. Any token discovered shorter than
* the minimum length will not be accepted and skipped over. The default
* is 0.
* @param minTokenLength the minimum length for a token to be used
*/
public void setMinTokenLength(int minTokenLength)
{
if(minTokenLength < 0)
throw new IllegalArgumentException("Minimum token length must be non negative, not " + minTokenLength);
if(minTokenLength > maxTokenLength)
throw new IllegalArgumentException("Minimum token length can not exced the maximum token length");
this.minTokenLength = minTokenLength;
}
/**
* Returns the minimum allowed token length
* @return the maximum allowed token length
*/
public int getMinTokenLength()
{
return minTokenLength;
}
/**
* Sets whether digits will be accepted in tokens or treated as "other" (not
* white space and not character). <br>
* The default it to allow digits.
*
* @param noDigits {@code true} to disallow numeric digits, {@code false} to
* allow digits.
*/
public void setNoDigits(boolean noDigits)
{
this.noDigits = noDigits;
}
/**
* Returns {@code true} if digits are not allowed in tokens, {@code false}
* otherwise.
* @return {@code true} if digits are not allowed in tokens, {@code false}
* otherwise.
*/
public boolean isNoDigits()
{
return noDigits;
}
}
| 6,460 | 30.671569 | 115 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/tokenizer/StemmingTokenizer.java |
package jsat.text.tokenizer;
import java.util.List;
import jsat.text.stemming.Stemmer;
/**
*
* @author Edward Raff
*/
public class StemmingTokenizer implements Tokenizer
{
private static final long serialVersionUID = 2883247633791522390L;
private Stemmer stemmer;
private Tokenizer baseTokenizer;
public StemmingTokenizer(Stemmer stemmer, Tokenizer baseTokenizer)
{
this.stemmer = stemmer;
this.baseTokenizer = baseTokenizer;
}
@Override
public List<String> tokenize(String input)
{
List<String> tokens = baseTokenizer.tokenize(input);
stemmer.applyTo(tokens);
return tokens;
}
@Override
public void tokenize(String input, StringBuilder workSpace, List<String> storageSpace)
{
baseTokenizer.tokenize(input, workSpace, storageSpace);
stemmer.applyTo(storageSpace);
}
}
| 893 | 21.35 | 90 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/tokenizer/StopWordTokenizer.java | package jsat.text.tokenizer;
import java.util.*;
/**
* This tokenizer wraps another such that any stop words that would have been
* returned by the base tokenizer are removed. The stop list is case sensitive.
*
* @author Edward Raff
*/
public class StopWordTokenizer implements Tokenizer
{
private static final long serialVersionUID = 445704970760705567L;
private Tokenizer base;
private Set<String> stopWords;
/**
* Creates a new Stop Word tokenizer
* @param base the base tokenizer to use
* @param stopWords the collection of stop words to remove from
* tokenizations. A copy of the collection will be made
*/
public StopWordTokenizer(Tokenizer base, Collection<String> stopWords)
{
this.base = base;
this.stopWords = new HashSet<String>(stopWords);
}
/**
* Creates a new Stop Word tokenizer
* @param base the base tokenizer to use
* @param stopWords the array of strings to use as stop words
*/
public StopWordTokenizer(Tokenizer base, String... stopWords)
{
this(base, Arrays.asList(stopWords));
}
@Override
public List<String> tokenize(String input)
{
List<String> tokens = base.tokenize(input);
tokens.removeAll(stopWords);
return tokens;
}
@Override
public void tokenize(String input, StringBuilder workSpace, List<String> storageSpace)
{
base.tokenize(input, workSpace, storageSpace);
storageSpace.removeAll(stopWords);
}
/**
* This unmodifiable set contains a very small and simple stop word list for
* English based on the 100 most common English words and includes all
* characters. All tokens the set are lowercase. <br>
* This stop list is not meant to be authoritative or complete, but only a
* reasonable starting point that shouldn't degrade any common tasks. <br>
* <br>
* Significant gains can be realized by deriving a stop list better suited
* to your individual needs.
*
*/
public static final Set<String> ENGLISH_STOP_SMALL_BASE = Collections.unmodifiableSet(new HashSet<String>(Arrays.asList(
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
"n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z",
"the", "of", "to", "and", "in", "is", "it", "you", "that",
"was", "for", "are", "on", "as", "have", "with", "they", "be", "at",
"this", "from", "or", "had", "by", "but", "some", "what", "there",
"we", "can", "out", "other", "were", "all", "your", "when", "use",
"word", "said", "an", "each", "which", "do", "their", "if", "will",
"way", "about", "many", "them", "would", "thing", "than", "down",
"too")));
}
| 2,842 | 35.448718 | 124 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/tokenizer/Tokenizer.java |
package jsat.text.tokenizer;
import java.io.Serializable;
import java.util.List;
/**
* Interface for taking the text of a document and breaking it up into features.
* For example "This doc" might become "this" and "doc"
*
* @author Edward Raff
*/
public interface Tokenizer extends Serializable
{
/**
* Breaks the input string into a series of tokens that may be used as
* features for a classifier. The returned tokens must be either new string
* objects or interned strings. If a token is returned that is backed by
* the original document, memory may get leaked by processes consuming the
* token. <br>
* This method should be thread safe
*
* @param input the string to tokenize
* @return an already allocated list to place the tokens into
*/
public List<String> tokenize(String input);
/**
* Breaks the input string into a series of tokens that may be used as
* features for a classifier. The returned tokens must be either new string
* objects or interned strings. If a token is returned that is backed by
* the original document, memory may get leaked by processes consuming the
* token. <br>
* This method should be thread safe
*
* @param input the string to tokenize
* @param workSpace an already allocated (but empty) string builder than can
* be used as a temporary work space.
* @param storageSpace an already allocated (but empty) list to place the
* tokens into
*/
public void tokenize(String input, StringBuilder workSpace, List<String> storageSpace);
}
| 1,621 | 35.863636 | 91 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/topicmodel/OnlineLDAsvi.java | package jsat.text.topicmodel;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.logging.Level;
import java.util.logging.Logger;
import jsat.DataSet;
import jsat.exceptions.FailedToFitException;
import jsat.linear.DenseVector;
import jsat.linear.IndexValue;
import jsat.linear.ScaledVector;
import jsat.linear.SparseVector;
import jsat.linear.Vec;
import jsat.math.FastMath;
import jsat.parameters.Parameter;
import jsat.parameters.Parameterized;
import jsat.utils.DoubleList;
import jsat.utils.FakeExecutor;
import jsat.utils.IntList;
import jsat.utils.ListUtils;
import jsat.utils.SystemInfo;
import jsat.utils.concurrent.ParallelUtils;
import jsat.utils.random.RandomUtil;
/**
* This class provides an implementation of <i>Latent Dirichlet Allocation</i>
* for learning a topic model from a set of documents. This implementation is
* based on Stochastic Variational Inference and is meant for large collections
* (more than 100,000 data points) and can learn in an online fashion. <br>
* <br>
* For LDA it is common to set {@link #setAlpha(double) α} =
* {@link #setEta(double) η} = 1/K, where K is the number of topics to be
* learned. Note that η is not a learning rate parameter, as the symbol is
* usually used. <br>
*
* For this algorithm, some potential parameter combinations (by column) for
* {@link #setMiniBatchSize(int) batch size}, {@link #setKappa(double) κ},
* and {@link #setTau0(double) τ<sub>0</sub>} are:<br>
* <table>
* <caption></caption>
* <tr>
* <td>batch size</td>
* <td>256</td>
* <td>1024</td>
* <td>4096</td>
* </tr>
* <tr>
* <td>κ</td>
* <td>0.6</td>
* <td>0.5</td>
* <td>0.5</td>
* </tr>
* <tr>
* <td>τ<sub>0</sub></td>
* <td>1024</td>
* <td>256</td>
* <td>64</td>
* </tr>
*
* </table><br>
* For smaller corpuses, reducing τ<sub>0</sub> can improve the performance (even down to τ<sub>0</sub> = 1)
* <br>
* See:<br>
* <ul>
* <li>Blei, D. M., Ng, A. Y.,&Jordan, M. I. (2003). <i>Latent Dirichlet
* Allocation</i>. Journal of Machine Learning Research, 3(4-5), 993–1022.
* doi:10.1162/jmlr.2003.3.4-5.993</li>
* <li>Hoffman, M., Blei, D.,&Bach, F. (2010). <i>Online Learning for Latent
* Dirichlet Allocation</i>. In Advances in Neural Information Processing
* Systems (pp. 856–864). Retrieved from
* <a href="http://videolectures.net/site/normal_dl/tag=83534/nips2010_1291.pdf">
* here</a></li>
* <li>Hoffman, M. D., Blei, D. M., Wang, C.,&Paisley, J. (2013).
* <i>Stochastic Variational Inference</i>. The Journal of Machine Learning
* Research, 14(1), 1303–1347.</li>
* <li>Hoffman, M. D. (2013). <i>Lazy updates for online LDA</i>. Retrieved from
* <a href="https://groups.yahoo.com/neo/groups/vowpal_wabbit/conversations/topics/250">here</a></li>
* </ul>
* @author Edward Raff
*/
public class OnlineLDAsvi implements Parameterized
{
private double alpha = 1;
private double eta = 1;
private double tau0 = 128;
private double kappa = 0.7;
private int epochs = 1;
private int D = -1;
private int K = -1;
private int W = -1;
private int miniBatchSize = 256;
private int t;
/**
* Creates a new Online LDA learner. The number of
* {@link #setK(int) topics}, expected number of
* {@link #setD(int) documents}, and the {@link #setVocabSize(int) vocabulary size}
* must be set before it can be used.
*/
public OnlineLDAsvi()
{
K = D = W = -1;
}
/**
* Creates a new Online LDA learner that is ready for online updates
* @param K the number of topics to learn
* @param D the expected number of documents to see
* @param W the vocabulary size
*/
public OnlineLDAsvi(int K, int D ,int W)
{
setK(K);
setD(D);
setVocabSize(W);
}
/*
* Using lists instead of matricies b/c we want to use HDP-OnlineLDAsvi for
* when k is not specified <br>
* One row for each K topics, each vec is |W| long
* <br><br>
* Lambda is also used to determine if the rest of the structures need to be
* re-intialized. When lambda is {@code null} the structures need to be
* reinitialized.
*/
private List<Vec> lambda;
private List<Lock> lambdaLocks;
/**
* Used to store the sum of each vector in {@link #lambda}. Updated live to avoid uncessary changes
*/
private DoubleList lambdaSums;
private int[] lastUsed;
private List<Vec> ELogBeta;//See equation 6 in 2010 paper
private List<Vec> ExpELogBeta;//See line 7 update in 2013 paper / equation (5) in 2010 paper
/**
* Holds the temp vector used to store gamma
*
* Gamma contains the per document update counterpart to {@link #lambda}.
*
* We need one gamma for each document, and each will have a value for all K
* topics.
*/
private ThreadLocal<Vec> gammaLocal;
/**
* Holds the temp vector used to store the expectation of {@link #gammaLocal}
*/
private ThreadLocal<Vec> logThetaLocal;
/**
* Holds the temp vector used to store the exponentiated expectation of
* {@link #logThetaLocal}
*/
private ThreadLocal<Vec> expLogThetaLocal;
/**
* Sets the number of topics that LDA will try to learn
* @param K the number of topics to learn
*/
public void setK(final int K)
{
if(K < 2)
throw new IllegalArgumentException("At least 2 topics must be learned");
this.K = K;
gammaLocal = new ThreadLocal<Vec>()
{
@Override
protected Vec initialValue()
{
return new DenseVector(K);
}
};
logThetaLocal = new ThreadLocal<Vec>()
{
@Override
protected Vec initialValue()
{
return new DenseVector(K);
}
};
expLogThetaLocal = new ThreadLocal<Vec>()
{
@Override
protected Vec initialValue()
{
return new DenseVector(K);
}
};
lambda = null;
}
/**
* Returns the number of topics to learn, or {@code -1} if <i>this</i>
* object is not ready to learn
* @return the number of topics that will be learned
*/
public int getK()
{
return K;
}
/**
* Sets the approximate number of documents that will be observed
* @param D the number of documents that will be observed
*/
public void setD(int D)
{
if(D < 1)
throw new IllegalArgumentException("The number of documents must be positive, not " + D);
this.D = D;
}
/**
* Returns the approximate number of documents that will be observed, or
* {@code -1} if <i>this</i> object is not ready to learn
* @return the number of documents that will be observed
*/
public int getD()
{
return D;
}
/**
* Sets the vocabulary size for LDA, which is the number of dimensions in
* the input feature vectors.
*
* @param W the vocabulary size for LDA
*/
public void setVocabSize(int W)
{
if(W < 1)
throw new IllegalArgumentException("Vocabulary size must be positive, not " + W);
this.W = W;
}
/**
* Returns the size of the vocabulary for LDA, or {@code -1} if <i>this</i>
* object is not ready to learn
* @return the vocabulary size for LDA
*/
public int getVocabSize()
{
return W;
}
/**
* Sets the prior for the on weight vector theta. 1/{@link #setK(int) K} is
* a common choice.
* @param alpha the positive prior value
*/
public void setAlpha(double alpha)
{
if(alpha <= 0 || Double.isInfinite(alpha) || Double.isNaN(alpha))
throw new IllegalArgumentException("Alpha must be a positive constant, not " + alpha);
this.alpha = alpha;
}
/**
*
* @return the weight vector prior over theta
*/
public double getAlpha()
{
return alpha;
}
/**
* Prior on topics. 1/{@link #setK(int) K} is a common choice.
* @param eta the positive prior for topics
*/
public void setEta(double eta)
{
if(eta <= 0 || Double.isInfinite(eta) || Double.isNaN(eta))
throw new IllegalArgumentException("Eta must be a positive constant, not " + eta);
this.eta = eta;
}
/**
*
* @return the topic prior
*/
public double getEta()
{
return eta;
}
/**
* A learning rate constant to control the influence of early iterations on
* the solution. Larger values reduce the influence of earlier iterations,
* smaller values increase the weight of earlier iterations.
* @param tau0 a learning rate parameter that must be greater than 0 (usually at least 1)
*/
public void setTau0(double tau0)
{
if(tau0 <= 0 || Double.isInfinite(tau0) || Double.isNaN(tau0))
throw new IllegalArgumentException("Eta must be a positive constant, not " + tau0);
this.tau0 = tau0;
}
/**
* Sets the number of training epochs when learning in a "batch" setting
* @param epochs the number of iterations to go over the data set
*/
public void setEpochs(int epochs)
{
this.epochs = epochs;
}
/**
* Returns the number of training iterations over the data set that will be
* used
* @return the number of training iterations over the data set that will be
* used
*/
public int getEpochs()
{
return epochs;
}
/**
* The "forgetfulness" factor in the learning rate. Larger values increase
* the rate at which old information is "forgotten"
* @param kappa the forgetfulness factor in [0.5, 1]
*/
public void setKappa(double kappa)
{
if(kappa < 0.5 || kappa > 1.0 || Double.isNaN(kappa))
throw new IllegalArgumentException("Kapp must be in [0.5, 1], not " + kappa);
this.kappa = kappa;
}
/**
*
* @return the forgetfulness factor
*/
public double getKappa()
{
return kappa;
}
/**
* Sets the number of data points used at a time to perform one update of
* the model parameters
* @param miniBatchSize the batch size to use
*/
public void setMiniBatchSize(int miniBatchSize)
{
if(miniBatchSize < 1)
throw new IllegalArgumentException("the batch size must be a positive constant, not " + miniBatchSize);
this.miniBatchSize = miniBatchSize;
}
/**
* Returns the topic vector for a given topic. The vector should not be
* altered, and is scaled so that the sum of all term weights sums to one.
* @param k the topic to get the vector for
* @return the raw topic vector for the requested topic.
*/
public Vec getTopicVec(int k)
{
return new ScaledVector(1.0/lambda.get(k).sum(), lambda.get(k));
}
/**
* From the 2013 paper, see expectations in figure 5 on page 1323, and
* equation (27) on page 1325
* See also equation 6 in the 2010 paper. 2013 paper figure 5 seems to be a
* typo
* @param input the vector to take the input values from
* @param sum the sum of the {@code input} vector
* @param output the vector to store the transformed inputs in
*/
private void expandPsiMinusPsiSum(Vec input, double sum, Vec output)
{
double psiSum = FastMath.digamma(sum);
for(int i = 0; i < input.length(); i++)
output.set(i, FastMath.digamma(input.get(i))-psiSum);
}
/**
* Gets a sample from the exponential distribution. Implemented to be fast
* at the cost of accuracy
* @param lambdaInv the inverse of the lambda value that parameterizes the
* exponential distribution
* @param p the random value in [0, 1)
* @return a sample from the exponential distribution
*/
private static double sampleExpoDist(double lambdaInv, double p)
{
return -lambdaInv* FastMath.log(1-p);
}
/**
* Performs an update of the LDA topic distributions based on the given
* mini-batch of documents.
* @param docs the list of document vectors to update from
*/
public void update(List<Vec> docs)
{
update(docs, new FakeExecutor());
}
/**
* Performs an update of the LDA topic distribution based on the given
* mini-batch of documents.
* @param docs the list of document vectors to update from
* @param ex the source of threads for parallel execution
*/
public void update(final List<Vec> docs, ExecutorService ex)
{
//need to init structure?
if(lambda == null)
initialize();
/*
* Make sure the beta values we will need are up to date
*/
updateBetas(docs, ex);
/*
* Note, on each update we dont modify or access lambda untill the very,
* end - so we can interleave the "M" step into the final update
* accumulation to avoid temp space allocation and more easily exploit
* sparsity
*
*/
//2: Set the step-size schedule ρt appropriately.
final double rho_t = Math.pow(tau0+(t++), -kappa);
//pre-shrink the lambda values so we can add out updates later
for(int k = 0; k < K; k++)
{
lambda.get(k).mutableMultiply(1-rho_t);
lambdaSums.set(k, lambdaSums.getD(k)*(1-rho_t));
}
/*
* As described in the 2010 paper, this part is the "E" step if we view
* it as an EM algorithm
*/
//5: Initialize γ_dk =1, for k ∈ {1, . . . ,K}.
/*
* See note on page 3 from 2010 paper: "In practice, this algorithm
* converges to a better solution if we reinitialize γ and φ before
* each E step"
*/
final int P = SystemInfo.LogicalCores;
final CountDownLatch latch = new CountDownLatch(P);
//main iner loop, outer is per document and inner most is per topic convergence
for(int id = 0; id < P; id++)
{
final int ID = id;
ex.submit(new Runnable()
{
@Override
public void run()
{
Random rand = RandomUtil.getRandom();
for(int d = ParallelUtils.getStartBlock(docs.size(), ID, P); d < ParallelUtils.getEndBlock(docs.size(), ID, P); d++)
{
final Vec doc = docs.get(d);
if(doc.nnz() == 0)
continue;
final Vec ELogTheta_d = logThetaLocal.get();
final Vec ExpELogTheta_d = expLogThetaLocal.get();
final Vec gamma_d = gammaLocal.get();
/*
* Make sure gamma and theta are set up and ready to start iterating
*/
prepareGammaTheta(gamma_d, ELogTheta_d, ExpELogTheta_d, rand);
int[] indexMap = new int[doc.nnz()];
double[] phiCols = new double[doc.nnz()];
//φ^k_dn ∝ exp{E[logθdk]+E[logβk,wdn ]}, k ∈ {1, . . . ,K}
computePhi(doc, indexMap, phiCols, K, gamma_d, ELogTheta_d, ExpELogTheta_d);
//accumulate updates, the "M" step
IntList toUpdate = new IntList(K);
ListUtils.addRange(toUpdate, 0, K, 1);
Collections.shuffle(toUpdate, rand);//helps reduce contention caused by shared iteration order
int updatePos = 0;
while(!toUpdate.isEmpty())
{
int k = toUpdate.getI(updatePos);
if(lambdaLocks.get(k).tryLock())
{
final double coeff = ExpELogTheta_d.get(k)*rho_t*D/docs.size();
final Vec lambda_k = lambda.get(k);
final Vec ExpELogBeta_k = ExpELogBeta.get(k);
double lambdaSum_k = lambdaSums.getD(k);
/*
* iterate and incremebt ourselves so that we can also compute
* the new sums in 1 pass
*/
for(int i = 0; i < doc.nnz(); i++)
{
int indx = indexMap[i];
double toAdd = coeff*phiCols[i]*ExpELogBeta_k.get(indx);
lambda_k.increment(indx, toAdd);
lambdaSum_k += toAdd;
}
lambdaSums.set(k, lambdaSum_k);
lambdaLocks.get(k).unlock();
toUpdate.remove(updatePos);
}
if(!toUpdate.isEmpty())
updatePos = (updatePos+1) % toUpdate.size();
}
}
latch.countDown();
}
});
}
try
{
latch.await();
}
catch (InterruptedException ex1)
{
Logger.getLogger(OnlineLDAsvi.class.getName()).log(Level.SEVERE, null, ex1);
}
}
/**
* Fits the LDA model against the given data set
* @param dataSet the data set to learn a topic model for
* @param topics the number of topics to learn
*/
public void model(DataSet dataSet, int topics)
{
model(dataSet, topics, new FakeExecutor());
}
/**
* Fits the LDA model against the given data set
* @param dataSet the data set to learn a topic model for
* @param topics the number of topics to learn
* @param ex the source of threads for parallel execution
*/
public void model(DataSet dataSet, int topics, ExecutorService ex)
{
if(ex == null)
ex = new FakeExecutor();
//Use notation same as original paper
setK(topics);
setD(dataSet.size());
setVocabSize(dataSet.getNumNumericalVars());
final List<Vec> docs = dataSet.getDataVectors();
for(int epoch = 0; epoch < epochs; epoch++)
{
Collections.shuffle(docs);
for(int i = 0; i < D; i+=miniBatchSize)
{
int to = Math.min(i+miniBatchSize, D);
update(docs.subList(i, to), ex);
}
}
}
/**
* Computes the topic distribution for the given document.<br>
* Note that the returned vector will be dense, but many of the values may
* be very nearly zero.
*
* @param doc the document to find the topics for
* @return a vector of the topic distribution for the given document
*/
public Vec getTopics(Vec doc)
{
Vec gamma = new DenseVector(K);
Random rand = RandomUtil.getRandom();
double lambdaInv = (W * K) / (D * 100.0);
for (int j = 0; j < gamma.length(); j++)
gamma.set(j, sampleExpoDist(lambdaInv, rand.nextDouble()) + eta);
Vec eLogTheta_i = new DenseVector(K);
Vec expLogTheta_i = new DenseVector(K);
expandPsiMinusPsiSum(gamma, gamma.sum(), eLogTheta_i);
for (int j = 0; j < eLogTheta_i.length(); j++)
expLogTheta_i.set(j, FastMath.exp(eLogTheta_i.get(j)));
computePhi(doc, new int[doc.nnz()], new double[doc.nnz()], K, gamma, eLogTheta_i, expLogTheta_i);
gamma.mutableDivide(gamma.sum());
return gamma;
}
/**
* Updates the Beta vectors associated with the {@link #gammaLocal gamma}
* topic distributions so that they can be used to update against the given
* batch of documents. Once updated, the Betas are the only items needed to
* perform updates from the given batch, and the gamma values can be updated
* as the updates are computed.
*
* @param docs the mini batch of documents to update from
*/
private void updateBetas(final List<Vec> docs, ExecutorService ex)
{
final double[] digammaLambdaSum = new double[K];//TODO may want to move this out & reuse
for(int k = 0; k < K; k++)
digammaLambdaSum[k] = FastMath.digamma(W*eta+lambdaSums.getD(k));
List<List<Vec>> docSplits = ListUtils.splitList(docs, SystemInfo.LogicalCores);
final CountDownLatch latch = new CountDownLatch(docSplits.size());
for(final List<Vec> docsSub : docSplits)
{
ex.submit(new Runnable()
{
@Override
public void run()
{
for(Vec doc : docsSub)//make sure out ELogBeta is up to date
for(IndexValue iv : doc)
{
int indx = iv.getIndex();
if(lastUsed[indx] != t)
{
for(int k = 0; k < K; k++)
{
double lambda_kj = lambda.get(k).get(indx);
double logBeta_kj = FastMath.digamma(eta+lambda_kj)-digammaLambdaSum[k];
ELogBeta.get(k).set(indx, logBeta_kj);
ExpELogBeta.get(k).set(indx, FastMath.exp(logBeta_kj));
}
lastUsed[indx] = t;
}
}
latch.countDown();
}
});
}
try
{
latch.await();
}
catch (InterruptedException ex1)
{
Logger.getLogger(OnlineLDAsvi.class.getName()).log(Level.SEVERE, null, ex1);
}
}
/**
* Prepares gamma and the associated theta expectations are initialized so
* that the iterative updates to them can begin.
*
* @param gamma_i will be completely overwritten
* @param eLogTheta_i will be completely overwritten
* @param expLogTheta_i will be completely overwritten
* @param rand the source of randomness
*/
private void prepareGammaTheta(Vec gamma_i, Vec eLogTheta_i, Vec expLogTheta_i, Random rand)
{
final double lambdaInv = (W * K) / (D * 100.0);
for (int j = 0; j < gamma_i.length(); j++)
gamma_i.set(j, sampleExpoDist(lambdaInv, rand.nextDouble()) + eta);
expandPsiMinusPsiSum(gamma_i, gamma_i.sum(), eLogTheta_i);
for (int j = 0; j < eLogTheta_i.length(); j++)
expLogTheta_i.set(j, FastMath.exp(eLogTheta_i.get(j)));
}
/**
* Performs the main iteration to determine the topic distribution of the
* given document against the current model parameters. The non zero values
* of phi will be stored in {@code indexMap} and {@code phiCols}
*
* @param doc the document to get the topic assignments for
* @param indexMap the array of integers to store the non zero document
* indices in
* @param phiCols the array to store the normalized non zero values of phi
* in, where each value corresponds to the associated index in
* {@code indexMap}
* @param K the number of topics
* @param gamma_d the initial value of γ that will be altered to the topic assignments, but not normalized
* @param ELogTheta_d the expectation from γ per topic
* @param ExpELogTheta_d the exponentiated vector for {@code ELogTheta_d}
*/
private void computePhi(final Vec doc, int[] indexMap, double[] phiCols, int K, final Vec gamma_d, final Vec ELogTheta_d, final Vec ExpELogTheta_d)
{
//φ^k_dn ∝ exp{E[logθdk]+E[logβk,wdn ]}, k ∈ {1, . . . ,K}
/*
* we have the exp versions of each, and exp(log(x)+log(y)) = x y
* so we can just use the doc product between the vectors per
* document to get the normalization constan Z
*
* When we update γ we multiply by the word, so non presnet words
* have no impact. So we don't need ALL of the columbs from φ, but
* only the columns for which we have non zero words.
*/
/*
* normalized for each topic column (len K) of the words in this doc.
* We only need to concern oursleves with the non zeros
*
* Beacse we need to update several iterations, we will work with
* the inernal stricture dirrectly instead of using expensitve
* get/set on a Sparse Vector
*/
int pos = 0;
final SparseVector updateVec = new SparseVector(indexMap, phiCols, W, doc.nnz());
for(IndexValue iv : doc)
{
int wordIndex = iv.getIndex();
double sum = 0;
for(int i = 0; i < ExpELogTheta_d.length(); i++)
sum += ExpELogTheta_d.get(i)*ExpELogBeta.get(i).get(wordIndex);
indexMap[pos] = wordIndex;
phiCols[pos] = iv.getValue()/(sum+1e-15);
pos++;
}
//iterate till convergence or we hit arbitrary 100 limit (dont usually see more than 70)
for(int iter = 0; iter < 100; iter++)
{
double meanAbsChange = 0;
double gamma_d_sum = 0;
//γtk = α+ w φ_twk n_tw
for(int k = 0; k < K; k++)
{
final double origGamma_dk = gamma_d.get(k);
double gamma_dtk = alpha;
gamma_dtk += ExpELogTheta_d.get(k) * updateVec.dot(ExpELogBeta.get(k));
gamma_d.set(k, gamma_dtk);
meanAbsChange += Math.abs(gamma_dtk-origGamma_dk);
gamma_d_sum += gamma_dtk;
}
//update Eq[log θtk] and our exponentated copy of it
expandPsiMinusPsiSum(gamma_d, gamma_d_sum, ELogTheta_d);
for(int i = 0; i < ELogTheta_d.length(); i++)
ExpELogTheta_d.set(i, FastMath.exp(ELogTheta_d.get(i)));
//update our column norm norms
int indx = 0;
for(IndexValue iv : doc)
{
int wordIndex = iv.getIndex();
double sum = 0;
for(int i = 0; i < ExpELogTheta_d.length(); i++)
sum += ExpELogTheta_d.get(i)*ExpELogBeta.get(i).get(wordIndex);
phiCols[indx] = iv.getValue() / (sum + 1e-15);
indx++;
}
/*
* //original papser uses a tighter bound, but our approximation
* isn't that good - and this seems to work well enough
* 0.01 even seems to work, but need to try that more before
* switching
*/
if(meanAbsChange < 0.001*K)
break;
}
}
private void initialize()
{
if(K < 1)
throw new FailedToFitException("Topic number for LDA has not yet been specified");
else if(D < 1)
throw new FailedToFitException("Expected number of documents has not yet been specified");
else if(W < 1)
throw new FailedToFitException("Topic vocuabulary size has not yet been specified");
t = 0;
//1: Initialize λ(0) randomly
lambda = new ArrayList<Vec>(K);
lambdaLocks = new ArrayList<Lock>(K);
lambdaSums = new DoubleList(K);
ELogBeta = new ArrayList<Vec>(K);
ExpELogBeta = new ArrayList<Vec>(K);
lastUsed = new int[W];
Arrays.fill(lastUsed, -1);
final double lambdaInv = (K*W)/(D*100.0);
Random rand = RandomUtil.getRandom();
for(int i = 0; i < K; i++)
{
Vec lambda_i = new DenseVector(W);
lambda.add(new ScaledVector(lambda_i));
lambdaLocks.add(new ReentrantLock());
ELogBeta.add(new DenseVector(W));
ExpELogBeta.add(new DenseVector(W));
double rowSum = 0;
for(int j = 0; j < W; j++)
{
double sample = sampleExpoDist(lambdaInv, rand.nextDouble())+eta;
lambda_i.set(j, sample);
rowSum += sample;
}
lambdaSums.add(rowSum);
}
//lambda has now been intialized, ELogBeta and ExpELogBeta will be intialized / updated lazily
}
}
| 29,530 | 34.795152 | 151 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/wordweighting/BinaryWordPresent.java | package jsat.text.wordweighting;
import java.util.List;
import jsat.linear.Vec;
/**
* Provides a simple binary representation of bag-of-word vectors by simply
* marking a value 1.0 if the token is present, and 0.0 if the token is not
* present. Nothing else is taken into account.<br>
* <br>
* This class does not require any state or configuration, so it can be used
* without calling {@link #setWeight(java.util.List, java.util.List) }.
*
*
* @author Edward Raff
*/
public class BinaryWordPresent implements WordWeighting
{
private static final long serialVersionUID = 5633647387188363706L;
@Override
public void setWeight(List<? extends Vec> allDocuments, List<Integer> df) {
//No work needed
}
@Override
public void applyTo(Vec vec)
{
vec.applyIndexFunction(this);
}
@Override
public double indexFunc(double value, int index)
{
if(index < 0)
return 0.0;
else if(value > 0.0)
return 1.0;
else
return 0.0;
}
}
| 1,060 | 22.577778 | 79 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/wordweighting/OkapiBM25.java | package jsat.text.wordweighting;
import java.util.List;
import jsat.linear.*;
/**
* Implements the <a href="http://en.wikipedia.org/wiki/Okapi_BM25">Okapi BM25
* </a> word weighting scheme.
*
* @author EdwardRaff
*/
public class OkapiBM25 implements WordWeighting
{
private static final long serialVersionUID = 6456657674702490465L;
private double k1;
private double b;
private double N;
private double docAvg;
/**
* Okapi document frequency is the number of documents that contain a term,
* not the number of times it occurs
*/
private int[] df;
/**
* Creates a new Okapi object
*/
public OkapiBM25()
{
this(1.5, 0.75);
}
/**
* Creates a new Okapi object
*
* @param k1 the non negative coefficient to apply to the term frequency
* @param b the coefficient to apply to the document length in the range [0,1]
*/
public OkapiBM25(double k1, double b)
{
if(Double.isNaN(k1) || Double.isInfinite(k1) || k1 < 0)
throw new IllegalArgumentException("coefficient k1 must be a non negative constant, not " + k1);
this.k1 = k1;
if(Double.isNaN(b) || b < 0 || b > 1)
throw new IllegalArgumentException("coefficient b must be in the range [0,1], not " + b);
this.b = b;
}
@Override
public void setWeight(List<? extends Vec> allDocuments, List<Integer> df)
{
this.df = new int[df.size()];
docAvg = 0;
for( Vec v : allDocuments)
{
for(IndexValue iv : v)
{
docAvg += iv.getValue();
this.df[iv.getIndex()]++;
}
}
N = allDocuments.size();
docAvg /= N;
}
@Override
public void applyTo(Vec vec)
{
if(df == null)
throw new RuntimeException("OkapiBM25 weightings haven't been initialized, setWeight method must be called before first use.");
double sum = vec.sum();
for(IndexValue iv : vec)
{
double value = iv.getValue();
int index = iv.getIndex();
double idf = Math.log( (N-df[index]+0.5)/(df[index]+0.5) );
double result = idf * (value*(k1+1))/(value+k1*(1-b+b*sum/docAvg));
vec.set(index, result);
}
}
@Override
public double indexFunc(double value, int index)
{
if (index < 0 || value == 0.0)
return 0.0;
return 0;
}
}
| 2,552 | 25.319588 | 139 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/wordweighting/TfIdf.java |
package jsat.text.wordweighting;
import static java.lang.Math.log;
import java.util.List;
import jsat.linear.Vec;
/**
* Applies Term Frequency Inverse Document Frequency (TF IDF) weighting to the
* word vectors.
*
* @author Edward Raff
*/
public class TfIdf implements WordWeighting
{
private static final long serialVersionUID = 5749882005002311735L;
public enum TermFrequencyWeight
{
/**
* BOOLEAN only takes into account whether or not the word is present in
* the document. <br>
* 1.0 if the count is non zero.
*/
BOOLEAN,
/**
* LOG returns a term weighting in [1, infinity) based on the log value
* of the term frequency<br>
* 1 + log(count)
*/
LOG,
/**
* DOC_NORMALIZED returns a term weighting in [0, 1] by normalizing the
* frequency by the most common word in the document. <br>
* count/(most Frequent word in document)
*
*/
DOC_NORMALIZED;
}
private double totalDocuments;
private List<Integer> df;
private double docMax = 0.0;
private TermFrequencyWeight tfWeighting;
/**
* Creates a new TF-IDF document weighting scheme that uses
* {@link TermFrequencyWeight#LOG LOG} weighting for term frequency.
*/
public TfIdf()
{
this(TermFrequencyWeight.LOG);
}
/**
* Creates a new TF-IDF document weighting scheme that uses the specified
* term frequency weighting
* @param tfWeighting the weighting method to use for the term frequency
* (tf) component
*/
public TfIdf(TermFrequencyWeight tfWeighting)
{
this.tfWeighting = tfWeighting;
}
@Override
public void setWeight(List<? extends Vec> allDocuments, List<Integer> df)
{
this.totalDocuments = allDocuments.size();
this.df = df;
}
@Override
public double indexFunc(double value, int index)
{
if (index < 0 || value == 0.0)
return 0.0;
double tf;// = 1+log(value);
switch(tfWeighting)
{
case BOOLEAN:
tf = 1.0;
break;
case LOG:
tf = 1+log(value);
break;
case DOC_NORMALIZED:
tf = value/docMax;
break;
default:
tf = value;
}
double idf = log(totalDocuments / df.get(index));
return tf * idf;
}
@Override
public void applyTo(Vec vec)
{
if(df == null)
throw new RuntimeException("TF-IDF weightings haven't been initialized, setWeight method must be called before first use.");
if(tfWeighting == TermFrequencyWeight.DOC_NORMALIZED)
docMax = vec.max();
vec.applyIndexFunction(this);
}
}
| 2,891 | 25.290909 | 136 | java |
JSAT | JSAT-master/JSAT/src/jsat/text/wordweighting/WordCount.java | package jsat.text.wordweighting;
import java.util.List;
import jsat.linear.Vec;
/**
* Provides a simple representation of bag-of-word vectors by simply using the
* number of occurrences for a word in a document as the weight for said word.
* <br><br>
* This class does not require any state or configuration, so it can be used
* without calling {@link #setWeight(java.util.List, java.util.List) }.
*
* @author Edward Raff
*/
public class WordCount implements WordWeighting
{
private static final long serialVersionUID = 4665749166722300326L;
@Override
public void setWeight(List<? extends Vec> allDocuments, List<Integer> df)
{
//No work needed
}
@Override
public void applyTo(Vec vec)
{
vec.applyIndexFunction(this);
}
@Override
public double indexFunc(double value, int index)
{
if(index < 0)
return 0.0;
else if(value > 0.0)
return value;
else
return 0.0;
}
}
| 1,010 | 22.511628 | 78 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.