repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
JSAT
JSAT-master/JSAT/src/jsat/classifiers/bayesian/graphicalmodel/DirectedGraph.java
package jsat.classifiers.bayesian.graphicalmodel; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; /** * Provides a class representing an undirected graph. Mutations to the graph should be done * exclusively through the methods provided by the class. Alterations should not be done to * the sets returned by any method. * * @author Edward Raff */ public class DirectedGraph<N> implements Cloneable { private static class Pair<A, B> { A incoming; B outgoing; public Pair(A first, B second) { this.incoming = first; this.outgoing = second; } public A getIncoming() { return incoming; } public B getOutgoing() { return outgoing; } @SuppressWarnings("unused") public void setIncoming(A first) { this.incoming = first; } @SuppressWarnings("unused") public void setOutgoing(B outgoing) { this.outgoing = outgoing; } @Override public boolean equals(Object obj) { if(obj == null || !(obj instanceof Pair)) return false; Pair other = (Pair) obj; return this.incoming.equals(other.incoming) && this.outgoing.equals(other.outgoing); } @Override public int hashCode() { int hash = 7; hash = 79 * hash + (this.incoming != null ? this.incoming.hashCode() : 0); hash = 79 * hash + (this.outgoing != null ? this.outgoing.hashCode() : 0); return hash; } } /** * Represents the nodes and all the edges. Each node N, is mapped to its * paired adjacency lists. * <br><br> * The first list contains all the nodes that * point to N, and the second list contains all the nodes that N * points to. */ private Map<N, Pair<HashSet<N>, HashSet<N>>> nodes; public DirectedGraph() { nodes = new HashMap<N, Pair<HashSet<N>, HashSet<N>>>(); } /** * Returns the set of all nodes currently in the graph * @return the set of all nodes in the graph */ public Set<N> getNodes() { return nodes.keySet(); } /** * Adds all the objects in <tt>c</tt> as nodes in the graph * @param c a collection of nodes to add */ public void addNodes(Collection<? extends N> c) { for(N n : c) addNode(n); } /** * Adds a new node to the graph * @param node the object to make a node */ public void addNode(N node) { if(!nodes.containsKey(node)) nodes.put(node, new Pair<HashSet<N>, HashSet<N>>(new HashSet<N>(), new HashSet<N>())); } /** * Returns the set of all parents of the requested node, or null if the node does not exist in the graph * @param n the node to obtain the parents of * @return the set of parents, or null if the node is not in the graph */ public Set<N> getParents(N n) { Pair<HashSet<N>, HashSet<N>> p = nodes.get(n); if(p == null) return null; return p.getIncoming(); } /** * Returns the set of all children of the requested node, or null if the node does not exist in the graph. * @param n the node to obtain the children of * @return the set of parents, or null if the node is not in the graph */ public Set<N> getChildren(N n) { Pair<HashSet<N>, HashSet<N>> p = nodes.get(n); if(p == null) return null; return p.getOutgoing(); } /** * Removes the specified node from the graph. If the node was not in the graph, not change occurs * @param node the node to remove from the graph */ public void removeNode(N node) { Pair<HashSet<N>, HashSet<N>> p = nodes.remove(node); if(p == null) return; //Outgoing edges we can ignore removint he node drops them. We need to avoid dangling incoming edges to this node we have removed HashSet<N> incomingNodes = p.getIncoming(); for(N incomingNode : incomingNodes) nodes.get(incomingNode).getOutgoing().remove(node); } /** * Returns true if both <tt>a</tt> and <tt>b</tt> are nodes in the graph * @param a the first value to check for * @param b the second value to check for * @return true if both <tt>a</tt> and <tt>b</tt> are in the graph, false otherwise */ private boolean containsBoth(N a, N b) { return nodes.containsKey(a) && nodes.containsKey(b); } /** * Adds a directed edge into the network from <tt>a</tt> to <tt>b</tt>. * If <tt>a</tt> and <tt>b</tt> are not nodes in the graph, nothing occurs. * @param a the parent node * @param b the child node */ public void addEdge(N a, N b) { if( !containsBoth(a, b) ) return;//Cant add nodes to things that doing exist nodes.get(a).getOutgoing().add(b); nodes.get(b).getIncoming().add(a); } /** * Removes a directed edge from the network connecting <tt>a</tt> to <tt>b</tt>. * If <tt>a</tt> and <tt>b</tt> are not nodes in the graph, nothing occurs. * @param a the parent node * @param b the child node */ public void removeEdge(N a, N b) { if(!containsBoth(a, b)) return; nodes.get(a).getOutgoing().remove(b); nodes.get(b).getIncoming().remove(a); } /** * Returns <tt>true</tt> if <tt>a</tt> is a node in the graph, or <tt>false</tt> otherwise. * @param a the node in question * @return <tt>true</tt> if the node exists, <tt>false</tt> otherwise */ public boolean containsNode(N a) { return nodes.containsKey(a); } @Override protected DirectedGraph<N> clone() { DirectedGraph<N> clone = new DirectedGraph<N>(); clone.addNodes(this.nodes.keySet()); for(N key : nodes.keySet()) { Pair<HashSet<N>, HashSet<N>> p = nodes.get(key); for(N n : p.getIncoming()) clone.nodes.get(key).getIncoming().add(n); for(N n : p.getOutgoing()) clone.nodes.get(key).getOutgoing().add(n); } return clone; } }
6,618
27.530172
137
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/bayesian/graphicalmodel/DiscreteBayesNetwork.java
package jsat.classifiers.bayesian.graphicalmodel; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutorService; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.classifiers.DataPointPair; import jsat.classifiers.bayesian.ConditionalProbabilityTable; import jsat.classifiers.bayesian.NaiveBayes; import jsat.exceptions.FailedToFitException; import jsat.utils.IntSet; import static java.lang.Math.*; /** * A class for representing a Baysian Network (BN) for discrete variables. A BN use a graph to representing * the relations between variables, and these links are called the structure. The structure of a BN must be * specified by an expert using the {@link #depends(int, int) } method. The target class should be specified * as the parent of the variables which have a causal relationship to it. These children of the target class * should then have their own children specified. Once the structure has been specified, the network can be * trained and used for classification. <br> * If the network structure has not been specified, or has no relationships for the target class, the BN will * create an edge from the target class to every variable. If no edges were ever specified, this initialization * of edges corresponds to a {@link NaiveBayes} implementation. * * @author Edward Raff */ public class DiscreteBayesNetwork implements Classifier { private static final long serialVersionUID = 2980734594356260141L; /** * The directed Graph that represents this BN */ protected DirectedGraph<Integer> dag; /** * The Conditional probability tables for each variable */ protected Map<Integer, ConditionalProbabilityTable> cpts; /** * The class we are predicting */ protected CategoricalData predicting; /** * The prior probabilities of each class value */ protected double[] priors; private boolean usePriors = DEFAULT_USE_PRIORS; /** * Whether or not the classifier should take into account the prior probabilities. Default value is {@value #DEFAULT_USE_PRIORS}. */ public static final boolean DEFAULT_USE_PRIORS = true; public DiscreteBayesNetwork() { dag = new DirectedGraph<Integer>(); } public CategoricalResults classify(DataPoint data) { CategoricalResults cr = new CategoricalResults(predicting.getNumOfCategories()); int classId = data.numCategoricalValues(); //Use log proababilities to avoid underflow double logPSum = 0; double[] logProbs = new double[cr.size()]; for(int i = 0; i < cr.size(); i++) { DataPointPair<Integer> dpp = new DataPointPair<Integer>(data, i); for(int classParent : dag.getChildren(classId)) logProbs[i] += log(cpts.get(classParent).query(classParent, dpp)); if(usePriors) logProbs[i] += log(priors[i]); logPSum += logProbs[i]; } for(int i = 0; i < cr.size(); i++) cr.setProb(i, exp(logProbs[i]-logPSum)); return cr; } /** * Adds a dependency relation ship between two variables that will be in the network. The integer value corresponds * the the index of the i'th categorical variable, where the class target's value is the number of categorical variables. * * @param parent the parent variable, which will be explained in part by the child * @param child the child variable, which contributes to the conditional probability of the parent. */ public void depends(int parent, int child) { dag.addNode(child); dag.addNode(parent); dag.addEdge(parent, child); } public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } public void train(ClassificationDataSet dataSet) { int classID = dataSet.getNumCategoricalVars(); if(classID == 0 ) throw new FailedToFitException("Network needs categorical attribtues to work"); predicting = dataSet.getPredicting(); priors = dataSet.getPriors(); cpts = new HashMap<Integer, ConditionalProbabilityTable>(); Set<Integer> cptTrainSet = new IntSet(); if(dag.getNodes().isEmpty()) { for(int i = 0; i < classID; i++) depends(classID, i); } for(int classParent : dag.getChildren(classID)) { Set<Integer> depends = dag.getChildren(classParent); ConditionalProbabilityTable cpt = new ConditionalProbabilityTable(); cptTrainSet.clear(); cptTrainSet.addAll(depends); cptTrainSet.add(classParent); cptTrainSet.add(classID); cpt.trainC(dataSet, cptTrainSet); cpts.put(classParent, cpt); } } public boolean supportsWeightedData() { return false; } @Override public Classifier clone() { throw new UnsupportedOperationException("Not supported yet."); } }
5,367
34.084967
134
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/bayesian/graphicalmodel/K2NetworkLearner.java
package jsat.classifiers.bayesian.graphicalmodel; import java.util.Set; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.DataPoint; import jsat.utils.IntList; import jsat.utils.IntSet; import jsat.utils.ListUtils; import static java.lang.Math.*; import static jsat.math.SpecialMath.*; /** * An implementation of the K2 algorithm for learning the structure of a Bayesian Network. When trained, * if no network has been specified, the K2 algorithm will attempt to learn a network structure. The * network structure can also be learned by calling {@link #learnNetwork(jsat.classifiers.ClassificationDataSet) } directly. * <br><br> * Note, that the K2 algorithm attempts to learn a whole network structure, and may learn things that are relevant * for the classification task. K2 often does not provide satisfactory results for classification. * <br><br> * See: <i>A bayesian method for the induction of probabilistic networks from data</i>. Gregory F. Cooper and Edward Herskovits. * @author Edward Raff */ public class K2NetworkLearner extends DiscreteBayesNetwork { private static final long serialVersionUID = -9681177007308829L; public K2NetworkLearner() { super(); } /** * list of all possible values of the attribute xi */ private int[] ri; private int maxParents; /** * Sets the maximum number of parents to allow a node when learning the network structure. If a non zero value is supplied, nodes will be allowed any number of parents. * @param maxParents sets the maximum number of parents a node may learn */ public void setMaxParents(int maxParents) { this.maxParents = maxParents; } /** * Returns the maximum number of parents allowed when learning a network structure, or zero if any number of parents are valid. * @return the maximum number of parents a node man learn */ public int getMaxParents() { return max(maxParents, 0); } /** * Learns the network structure from the given data set. * @param D the data set to learn the network from */ public void learnNetwork(ClassificationDataSet D) { IntList varOrder = new IntList(D.getNumCategoricalVars()+1); varOrder.add(D.getNumCategoricalVars());//Classification target will be evaluated first ListUtils.addRange(varOrder, 0, D.getNumCategoricalVars(), 1); ri = new int[varOrder.size()]; for(int i : varOrder) if(i == D.getNumCategoricalVars()) ri[i] = D.getClassSize(); else ri[i] = D.getCategories()[i].getNumOfCategories(); int u = maxParents; if(u <= 0) u = ri.length; /** * Stores the set of variables preceding the current one being evaluated */ Set<Integer> preceding = new IntSet(); for(int i : varOrder)//Loop of the variables in the intended order { Set<Integer> pi = new IntSet();//The current parrents of variable i double pOld = f(i, pi, D); boolean OKToProceed = true; Set<Integer> candidates = new IntSet(preceding); while(OKToProceed && pi.size() < u) { if(candidates.isEmpty()) break;//Break out of the loop, no candidates left. //Best candidate solution double pNew = Double.NEGATIVE_INFINITY; //The best candidate int z = -1; candidates.removeAll(pi); //Find the variable that maximizes our gain for(int candidate : candidates) { pi.add(candidate); double tmp = f(i, pi, D); if(tmp > pNew) { pNew = tmp; z = candidate; } pi.remove(candidate); } if(pNew > pOld) { pOld = pNew; pi.add(z); } else OKToProceed = false; } for(int parrent : pi) depends(parrent, i); preceding.add(i); } ri = null; } @Override public void train(ClassificationDataSet dataSet) { if(dag.getNodes().isEmpty() || dag.getParents(dataSet.getNumCategoricalVars()).isEmpty()) learnNetwork(dataSet); super.train(dataSet); } /** * Queries the data set for the number of instances that have each possible combination of values. * <tt>classes</tT> and <tt>values</tt> should have the same length. Each value in classes * corresponds to the target value specified in <tt>values</tt>. We will return the number * of data points that satisfy all class value pairs * * * @param classes the classes to check * @param values the values to check for * @param D the data set to search * @return the number of times the value constraints are satisfied in the data set */ private double query(int[] classes, int[] values, ClassificationDataSet D) { double count = 1; for(int i = 0; i < D.size(); i++) { DataPoint dp = D.getDataPoint(i); //Use j to break early (set value) or indicate success (j == classes.length) int j; for(j = 0; j < classes.length; j++) { if(classes[j] == D.getNumCategoricalVars())//Special case { if(D.getDataPointCategory(i) != values[j]) j = classes.length+1; } else if(dp.getCategoricalValue(j) != values[j]) j = classes.length+1; } if(j == classes.length) count+=D.getWeight(i); } return count; } public double f(int i, Set<Integer> pi, ClassificationDataSet D) { double term2 = 0.0; double Nijk = 0.0; if(pi.isEmpty())//Special case { int[] classes = new int[] {i}; int[] values = new int[1]; for(int k = 0; k < ri[i]; k++) { values[0] = k; double count = query(classes, values, D); Nijk += count; term2 += lnGamma(count+1); } return ((lnGamma(ri[i]) - lnGamma(Nijk + ri[i])) + term2); } double fullProduct = 0.0; //General case int[] classes = new int[pi.size()+1]; int[] values = new int[pi.size()+1]; int c = 0; for(int clas : pi) classes[c++] = clas; classes[c] = i;//Last one is the one we are currently evaluating //Default all values to zero, which is fine //We need to compute the sum for ever possible combination of class values. //We do this by incrementing the values array and breaking out when we get to an invalid state while(true) { term2 = Nijk = 0.0; for(int k = 0; k < ri[i]; k++) { values[pi.size()] = k; double count = query(classes, values, D); Nijk += count; term2 += lnGamma(count+1); } fullProduct += (lnGamma(ri[i]) - lnGamma(Nijk + ri[i])) + term2; //Increment the variable count int pos = 0; values[pos]++; values[pi.size()] = 0;//We set this to zero. If its value is non zero after this loop, //then all values have been iterated past their max value, they have rolled //over to all zeros, and we are done while(values[pos] >= ri[classes[pos]] && pos < pi.size()) { values[pos++] = 0; values[pos]++; } if(values[pi.size()] != 0) break; } return (fullProduct); } }
8,410
33.052632
173
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/boosting/AdaBoostM1.java
package jsat.classifiers.boosting; import java.util.ArrayList; import java.util.Collections; import java.util.List; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.classifiers.OneVSAll; import jsat.exceptions.FailedToFitException; import jsat.linear.Vec; import jsat.parameters.Parameterized; import jsat.utils.DoubleList; /** * Implementation of Experiments with a New Boosting Algorithm, by Yoav Freund&amp;Robert E. Schapire. * <br> * This is the first AdaBoost algorithm presented in the paper, and the first boosting algorithm. * Though not often mentioned, AdaBoost does support non binary classification tasks. However, * for any <i>k</i> labels, the weak learner's error still needs to be better then 1/2, which * is not an easy requirement to satisfy. For this reason, many use AdaBoostM1 by reducing * <i>k</i> class classification problems to several 2 class problems. * <br><br> * Many Boosting methods, when given a binary classification task, reduce to having the same results as this class. * <br> <br> * AdaBoost is often combined with {@link OneVSAll} to obtain better classification accuracy. * * * @author Edward Raff */ public class AdaBoostM1 implements Classifier, Parameterized { private static final long serialVersionUID = 4205232097748332861L; private Classifier weakLearner; private int maxIterations; /** * The list of weak hypothesis */ protected List<Classifier> hypoths; /** * The weights for each weak learner */ protected List<Double> hypWeights; protected CategoricalData predicting; public AdaBoostM1(Classifier weakLearner, int maxIterations) { setWeakLearner(weakLearner); this.maxIterations = maxIterations; } public AdaBoostM1(AdaBoostM1 toCopy) { this(toCopy.weakLearner.clone(), toCopy.maxIterations); if(toCopy.hypWeights != null) this.hypWeights = new DoubleList(toCopy.hypWeights); if(toCopy.hypoths != null) { this.hypoths = new ArrayList<>(toCopy.hypoths.size()); for(int i = 0; i < toCopy.hypoths.size(); i++) this.hypoths.add(toCopy.hypoths.get(i).clone()); } if(toCopy.predicting != null) this.predicting = toCopy.predicting.clone(); } /** * Returns the maximum number of iterations used * @return the maximum number of iterations used */ public int getMaxIterations() { return maxIterations; } /** * * @return a list of the models that are in this ensemble. */ public List<Classifier> getModels() { return Collections.unmodifiableList(hypoths); } /** * * @return a list of the models weights that are in this ensemble. */ public List<Double> getModelWeights() { return Collections.unmodifiableList(hypWeights); } /** * Sets the maximal number of boosting iterations that may be performed * @param maxIterations the maximum number of iterations */ public void setMaxIterations(int maxIterations) { if(maxIterations < 1) throw new IllegalArgumentException("Number of iterations must be a positive value, no " + maxIterations); this.maxIterations = maxIterations; } /** * Returns the weak learner currently being used by this method. * @return the weak learner currently being used by this method. */ public Classifier getWeakLearner() { return weakLearner; } /** * Sets the weak learner used during training. * @param weakLearner the weak learner to use */ public void setWeakLearner(Classifier weakLearner) { if(!weakLearner.supportsWeightedData()) throw new FailedToFitException("WeakLearner must support weighted data to be boosted"); this.weakLearner = weakLearner; } @Override public CategoricalResults classify(DataPoint data) { if(predicting == null) throw new RuntimeException("Classifier has not been trained yet"); CategoricalResults cr = new CategoricalResults(predicting.getNumOfCategories()); for(int i=0; i < hypoths.size(); i++) cr.incProb(hypoths.get(i).classify(data).mostLikely(), hypWeights.get(i)); cr.normalize(); return cr; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { /* * Implementation note: We want all weights to be >= 1, so we will scale all weight values by the smallest weight value */ predicting = dataSet.getPredicting(); hypWeights = new DoubleList(maxIterations); hypoths = new ArrayList<>(maxIterations); Vec origWeights = dataSet.getDataWeights(); //Initialization step, set up the weights so they are all 1 / size of dataset for(int i = 0; i < dataSet.size(); i++) dataSet.setWeight(i, 1.0); double scaledBy = dataSet.size(); //Rather then reclasify points, we just save this list boolean[] wasCorrect = new boolean[dataSet.size()]; for(int t = 0; t < maxIterations; t++) { weakLearner.train(dataSet, parallel); double error = 0.0; for(int i = 0; i < dataSet.size(); i++) if( !(wasCorrect[i] = weakLearner.classify(dataSet.getDataPoint(i)).mostLikely() == dataSet.getDataPointCategory(i)) ) error += dataSet.getWeight(i); error /= scaledBy; if(error > 0.5 || error == 0.0) return; double bt = error /( 1.0 - error ); //Update Distribution weights double Zt = 0.0; double newScale = scaledBy;//Not scaled for(int i = 0; i < wasCorrect.length; i++) { DataPoint dp = dataSet.getDataPoint(i); if(wasCorrect[i])//Put less weight on the points we got correct { double w = dataSet.getWeight(i)*bt; dataSet.setWeight(i, w); } double trueWeight = dataSet.getWeight(i)/scaledBy; if(1.0/trueWeight > newScale) newScale = 1.0/trueWeight; Zt += dataSet.getWeight(i)/scaledBy;//Sum the values } for(int i = 0; i < dataSet.size(); i++)//Normalize so the weights make a distribution dataSet.setWeight(i, dataSet.getWeight(i)/scaledBy*newScale/Zt); scaledBy = newScale; hypoths.add(weakLearner.clone()); hypWeights.add(Math.log(1/bt)); } for(int i = 0; i < dataSet.size(); i++) dataSet.setWeight(i, origWeights.get(i)); } @Override public boolean supportsWeightedData() { return false; } @Override public AdaBoostM1 clone() { return new AdaBoostM1(this); } }
7,301
31.891892
134
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/boosting/ArcX4.java
package jsat.classifiers.boosting; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.parameters.Parameterized; import jsat.utils.concurrent.ParallelUtils; /** * Arc-x4 is a ensemble-classifier that performs re-weighting of the data points * based on the total number of errors that have occurred for the data point. * <br><br> * See: Breiman, L. (1998). <i>Arcing Classifiers</i>. The Annals of Statistics, * 26(3), 801–824. * * @author Edward Raff */ public class ArcX4 implements Classifier, Parameterized { private static final long serialVersionUID = 3831448932874147550L; private Classifier weakLearner; private int iterations; private double coef = 1; private double expo = 4; private CategoricalData predicing; private Classifier[] hypoths; /** * Creates a new Arc-X4 classifier * * @param weakLearner the weak learner to use * @param iterations the number of iterations to perform */ public ArcX4(Classifier weakLearner, int iterations) { setWeakLearner(weakLearner); setIterations(iterations); } /** * Sets the weak learner used at each iteration of learning * @param weakLearner the weak learner to use */ public void setWeakLearner(Classifier weakLearner) { if(!weakLearner.supportsWeightedData()) throw new RuntimeException("Weak learners must support weighted data samples"); this.weakLearner = weakLearner; } /** * Returns the weak learner used * @return the weak learner used */ public Classifier getWeakLearner() { return weakLearner; } /** * Sets the number of iterations to perform * @param iterations the number of iterations to do */ public void setIterations(int iterations) { this.iterations = iterations; } /** * Returns the number of iterations to learn * @return the number of iterations to learn */ public int getIterations() { return iterations; } /** * Weights are updated as 1+coef*errors<sup>expo</sup>. This sets the * coefficient used to update the errors * * @param coef the multiplicative factor on the errors in weight construction */ public void setCoefficient(double coef) { if(coef <= 0 || Double.isInfinite(coef) || Double.isNaN(coef)) throw new ArithmeticException("The coefficient must be a positive constant"); this.coef = coef; } /** * Returns the coefficient use when re-weighting * @return the coefficient use when re-weighting */ public double getCoefficient() { return coef; } /** * Weights are updated as 1+coef*errors<sup>expo</sup>. This sets the * exponent used to update the errors * @param expo the exponent to use */ public void setExponent(double expo) { if(expo <= 0 || Double.isInfinite(expo) || Double.isNaN(expo)) throw new ArithmeticException("The exponent must be a positive constant"); this.expo = expo; } /** * Returns the exponent used when re-weighting * @return the exponent used when re-weighting */ public double getExponent() { return expo; } @Override public CategoricalResults classify(DataPoint data) { CategoricalResults cr = new CategoricalResults(predicing.getNumOfCategories()); for(Classifier hypoth : hypoths) cr.incProb(hypoth.classify(data).mostLikely(), 1.0); cr.normalize(); return cr; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { //Create a low memory clone that only has different dataPoint Objects to save space ClassificationDataSet cds = dataSet.shallowClone(); //Everyone starts with no errors int[] errors = new int[cds.size()]; hypoths = new Classifier[iterations]; for(int t = 0; t < hypoths.length; t++) { for(int i = 0; i < cds.size(); i++) cds.setWeight(i, 1+coef*Math.pow(errors[i], expo)); Classifier hypoth = weakLearner.clone(); hypoth.train(cds, parallel); hypoths[t] = hypoth; ParallelUtils.run(parallel, errors.length, (start, end) -> { for(int i = start; i < end; i++) if(hypoth.classify(cds.getDataPoint(i)).mostLikely() != cds.getDataPointCategory(i)) errors[i]++; }); } this.predicing = cds.getPredicting(); } @Override public boolean supportsWeightedData() { return false; } @Override public ArcX4 clone() { ArcX4 clone = new ArcX4(weakLearner.clone(), iterations); clone.coef = this.coef; clone.expo = this.expo; if(this.predicing != null) clone.predicing = this.predicing.clone(); if(this.hypoths != null) { clone.hypoths = new Classifier[this.hypoths.length]; for(int i = 0; i < clone.hypoths.length; i++) clone.hypoths[i] = this.hypoths[i].clone(); } return clone; } }
5,586
27.505102
104
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/boosting/Bagging.java
package jsat.classifiers.boosting; import java.util.*; import java.util.concurrent.*; import java.util.logging.Level; import java.util.logging.Logger; import jsat.classifiers.*; import jsat.classifiers.knn.NearestNeighbour; import jsat.classifiers.trees.DecisionTree; import jsat.math.OnLineStatistics; import jsat.parameters.*; import jsat.regression.RegressionDataSet; import jsat.regression.Regressor; import jsat.utils.SystemInfo; import jsat.utils.concurrent.ParallelUtils; /** * An implementation of Bootstrap Aggregating, as described by LEO BREIMAN in "Bagging Predictors". <br> * <br> * Bagging is an ensemble learner, it takes a weak learner and trains several to create a better over result. * Bagging is particularly useful when the base classifier has some amount of predictive power, but is hindered * by variance in the output (small change in input causes large change in output), or variances in input * (handles noisy data badly or is has a brittle learning algorithm). It is common to perform bagging on * {@link DecisionTree Decision Trees}, because they meet these strengths and weaknesses. <br> * Bagging produces little to no improvement when using learners that have low variance and robust learning * methods. {@link NearestNeighbour} is an example of a particularly bad method to bag. * <br> * Bagging has many similarities to boosting. * @author Edward Raff */ public class Bagging implements Classifier, Regressor, Parameterized { private static final long serialVersionUID = -6566453570170428838L; private Classifier baseClassifier; private Regressor baseRegressor; private CategoricalData predicting; private int extraSamples; private int rounds; private boolean simultaniousTraining; private Random random; private List learners; /** * The number of rounds of bagging that will be used by default in the constructor: {@value #DEFAULT_ROUNDS} */ public static final int DEFAULT_ROUNDS = 20; /** * The number of extra samples to take when bagging in each round used by default in the constructor: {@value #DEFAULT_EXTRA_SAMPLES} */ public static final int DEFAULT_EXTRA_SAMPLES = 0; /** * The default behavior for parallel training, as specified by {@link #setSimultaniousTraining(boolean) } is {@value #DEFAULT_SIMULTANIOUS_TRAINING} */ public static final boolean DEFAULT_SIMULTANIOUS_TRAINING = true; /** * Creates a new Bagger for classification. This can not be changed after construction. * * @param baseClassifier the base learner to use. */ public Bagging(Classifier baseClassifier) { this(baseClassifier, DEFAULT_EXTRA_SAMPLES, DEFAULT_SIMULTANIOUS_TRAINING); } /** * Creates a new Bagger for classification. This can not be changed after construction. * * @param baseClassifier the base learner to use. * @param extraSamples how many extra samples past the training size to take * @param simultaniousTraining controls whether base learners are trained sequentially or simultaneously */ public Bagging(Classifier baseClassifier, int extraSamples, boolean simultaniousTraining) { this(baseClassifier, extraSamples, simultaniousTraining, DEFAULT_ROUNDS, new Random(1)); } /** * Creates a new Bagger for classification. This can not be changed after construction. * * @param baseClassifier the base learner to use. * @param extraSamples how many extra samples past the training size to take * @param simultaniousTraining controls whether base learners are trained sequentially or simultaneously * @param rounds how many rounds of bagging to perform. * @param random the source of randomness for sampling */ public Bagging(Classifier baseClassifier, int extraSamples, boolean simultaniousTraining, int rounds, Random random) { this(extraSamples, simultaniousTraining, rounds, random); this.baseClassifier = baseClassifier; } /** * Creates a new Bagger for regression. This can not be changed after construction. * * @param baseRegressor the base learner to use. */ public Bagging(Regressor baseRegressor) { this(baseRegressor, DEFAULT_EXTRA_SAMPLES, DEFAULT_SIMULTANIOUS_TRAINING); } /** * Creates a new Bagger for regression. This can not be changed after construction. * * @param baseRegressor the base learner to use. * @param extraSamples how many extra samples past the training size to take * @param simultaniousTraining controls whether base learners are trained sequentially or simultaneously */ public Bagging(Regressor baseRegressor, int extraSamples, boolean simultaniousTraining) { this(baseRegressor, extraSamples, simultaniousTraining, DEFAULT_ROUNDS, new Random(1)); } /** * Creates a new Bagger for regression. This can not be changed after construction. * * @param baseRegressor the base learner to use. * @param extraSamples how many extra samples past the training size to take * @param simultaniousTraining controls whether base learners are trained sequentially or simultaneously * @param rounds how many rounds of bagging to perform. * @param random the source of randomness for sampling */ public Bagging(Regressor baseRegressor, int extraSamples, boolean simultaniousTraining, int rounds, Random random) { this(extraSamples, simultaniousTraining, rounds, random); this.baseRegressor = baseRegressor; } //For internal use private Bagging(int extraSamples, boolean simultaniousTraining, int rounds, Random random) { setExtraSamples(extraSamples); setSimultaniousTraining(simultaniousTraining); setRounds(rounds); this.random = random; } /** * Bagging samples from the training set with replacement, and draws a sampleWithReplacement at least as large * as the training set. This controls how many extra samples are taken. If negative, fewer * samples will be taken. Using negative values is not recommended. * * @param i how many extra samples to take */ public void setExtraSamples(int i) { extraSamples = i; } public int getExtraSamples() { return extraSamples; } /** * Sets the number of rounds that bagging is done, meaning how many base learners are trained * @param rounds the number of base learners to train * @throws ArithmeticException if the number specified is not a positive value */ public void setRounds(int rounds) { if(rounds <= 0) throw new ArithmeticException("Must train a positive number of learners"); this.rounds = rounds; } /** * Returns the number of rounds of boosting that will be done, which is also the number of base learners that will be trained * @return the number of rounds of boosting that will be done, which is also the number of base learners that will be trained */ public int getRounds() { return rounds; } /** * Bagging produces multiple base learners. These can all be trained at the same time, using more memory, * or sequentially using the base learner's parallel training method. If set to true, the base learners * will be trained simultaneously. * * @param simultaniousTraining true to train all learners at the same time, false to train them sequentially */ public void setSimultaniousTraining(boolean simultaniousTraining) { this.simultaniousTraining = simultaniousTraining; } @Override public CategoricalResults classify(DataPoint data) { if(baseClassifier == null) throw new RuntimeException("Bagging instance created for regression, not classification"); else if(learners == null || learners.isEmpty()) throw new RuntimeException("Classifier has not yet been trained"); CategoricalResults totalResult = new CategoricalResults(predicting.getNumOfCategories()); for(int i = 0; i < learners.size(); i++) { CategoricalResults result = ((Classifier) learners.get(i)).classify(data); totalResult.incProb(result.mostLikely(), 1.0); } totalResult.normalize(); return totalResult; } @Override public void train(final ClassificationDataSet dataSet, final boolean parallel) { predicting = dataSet.getPredicting(); learners = new ArrayList(rounds); //Used to make the main thread wait for the working threads to finish before submiting a new job so we dont waist too much memory then we can use at once final Semaphore waitForThread = new Semaphore(SystemInfo.LogicalCores); //Used to make the main thread wait for the working threads to finish before returning final CountDownLatch waitForFinish = new CountDownLatch(rounds); //Creat a synchrnozied view so we can add safely final List synchronizedLearners = Collections.synchronizedList(learners); final int[] sampleCounts = new int[dataSet.size()]; ExecutorService threadPool = ParallelUtils.getNewExecutor(parallel); for(int i = 0; i < rounds; i++) { sampleWithReplacement(sampleCounts, sampleCounts.length+extraSamples, random); final ClassificationDataSet sampleSet = getSampledDataSet(dataSet, sampleCounts); final Classifier learner = baseClassifier.clone(); if(simultaniousTraining && parallel) { try { //Wait for an available thread waitForThread.acquire(); threadPool.submit(() -> { learner.train(sampleSet); synchronizedLearners.add(learner); waitForThread.release();//Finish, allow another one to pass through waitForFinish.countDown(); }); } catch (InterruptedException ex) { Logger.getLogger(Bagging.class.getName()).log(Level.SEVERE, null, ex); System.err.println(ex.getMessage()); } } else { learner.train(sampleSet, parallel); learners.add(learner); } } if (simultaniousTraining && parallel) try { waitForFinish.await(); } catch (InterruptedException ex) { Logger.getLogger(Bagging.class.getName()).log(Level.SEVERE, null, ex); System.err.println(ex.getMessage()); } threadPool.shutdownNow(); } /** * Creates a new data set from the given sample counts. Points sampled * multiple times will have multiple entries in the data set. * @param dataSet the data set that was sampled from * @param sampledCounts the sampling values obtained from * {@link #sampleWithReplacement(int[], int, java.util.Random) } * @return a new sampled classification data set */ public static ClassificationDataSet getSampledDataSet(ClassificationDataSet dataSet, int[] sampledCounts) { ClassificationDataSet destination = new ClassificationDataSet(dataSet.getNumNumericalVars(), dataSet.getCategories(), dataSet.getPredicting()); for (int i = 0; i < sampledCounts.length; i++) for(int j = 0; j < sampledCounts[i]; j++) { DataPoint dp = dataSet.getDataPoint(i); destination.addDataPoint(dp.getNumericalValues(), dp.getCategoricalValues(), dataSet.getDataPointCategory(i)); } return destination; } /** * Creates a new data set from the given sample counts. Points sampled * multiple times will be added once to the data set with their weight * multiplied by the number of times it was sampled. * @param dataSet the data set that was sampled from * @param sampledCounts the sampling values obtained from * {@link #sampleWithReplacement(int[], int, java.util.Random) } * @return a new sampled classification data set */ public static ClassificationDataSet getWeightSampledDataSet(ClassificationDataSet dataSet, int[] sampledCounts) { ClassificationDataSet destination = new ClassificationDataSet(dataSet.getNumNumericalVars(), dataSet.getCategories(), dataSet.getPredicting()); for (int i = 0; i < sampledCounts.length; i++) { if(sampledCounts[i] <= 0) continue; DataPoint dp = dataSet.getDataPoint(i); destination.addDataPoint(dp, dataSet.getDataPointCategory(i), dataSet.getWeight(i)*sampledCounts[i]); } return destination; } /** * Creates a new data set from the given sample counts. Points sampled * multiple times will have multiple entries in the data set. * @param dataSet the data set that was sampled from * @param sampledCounts the sampling values obtained from * {@link #sampleWithReplacement(int[], int, java.util.Random) } * @return a new sampled classification data set */ public static RegressionDataSet getSampledDataSet(RegressionDataSet dataSet, int[] sampledCounts) { RegressionDataSet destination = new RegressionDataSet(dataSet.getNumNumericalVars(), dataSet.getCategories()); for (int i = 0; i < sampledCounts.length; i++) for (int j = 0; j < sampledCounts[i]; j++) { DataPoint dp = dataSet.getDataPoint(i); destination.addDataPoint(dp, dataSet.getTargetValue(i)); } return destination; } /** * Creates a new data set from the given sample counts. Points sampled * multiple times will be added once to the data set with their weight * multiplied by the number of times it was sampled. * @param dataSet the data set that was sampled from * @param sampledCounts the sampling values obtained from * {@link #sampleWithReplacement(int[], int, java.util.Random) } * @return a new sampled classification data set */ public static RegressionDataSet getWeightSampledDataSet(RegressionDataSet dataSet, int[] sampledCounts) { RegressionDataSet destination = new RegressionDataSet(dataSet.getNumNumericalVars(), dataSet.getCategories()); for (int i = 0; i < sampledCounts.length; i++) { if(sampledCounts[i] <= 0) continue; DataPoint dp = dataSet.getDataPoint(i); destination.addDataPoint(dp, dataSet.getTargetValue(i), dataSet.getWeight(i)*sampledCounts[i]); } return destination; } /** * Performs the sampling based on the number of data points, storing the * counts in an array to be constructed from XXXX * @param sampleCounts an array to keep count of how many times each data * point was sampled. The array will be filled with zeros before sampling * starts * @param samples the number of samples to take from the data set * @param rand the source of randomness */ static public void sampleWithReplacement(int[] sampleCounts, int samples, Random rand) { Arrays.fill(sampleCounts, 0); for(int i = 0; i < samples; i++) sampleCounts[rand.nextInt(sampleCounts.length)]++; } @Override public boolean supportsWeightedData() { return false; } @Override public double regress(DataPoint data) { if(baseRegressor == null) throw new RuntimeException("Bagging instance created for classification, not regression"); else if(learners == null || learners.isEmpty()) throw new RuntimeException("Regressor has not yet been trained"); OnLineStatistics stats = new OnLineStatistics(); for(int i = 0; i < learners.size(); i++) { double x = ((Regressor) learners.get(i)).regress(data); stats.add(x); } return stats.getMean(); } @Override public void train(RegressionDataSet dataSet, final boolean parallel) { learners = new ArrayList(rounds); //Used to make the main thread wait for the working threads to finish before submiting a new job so we dont waist too much memory then we can use at once final Semaphore waitForThread = new Semaphore(SystemInfo.LogicalCores); //Used to make the main thread wait for the working threads to finish before returning final CountDownLatch waitForFinish = new CountDownLatch(rounds); //Creat a synchrnozied view so we can add safely final List synchronizedLearners = Collections.synchronizedList(learners); final int[] sampleCount = new int[dataSet.size()]; ExecutorService threadPool = ParallelUtils.getNewExecutor(parallel); for(int i = 0; i < rounds; i++) { sampleWithReplacement(sampleCount, sampleCount.length+extraSamples, random); final RegressionDataSet sampleSet = getSampledDataSet(dataSet, sampleCount); final Regressor learner = baseRegressor.clone(); if(simultaniousTraining && parallel) { try { //Wait for an available thread waitForThread.acquire(); threadPool.submit(() -> { learner.train(sampleSet); synchronizedLearners.add(learner); waitForThread.release();//Finish, allow another one to pass through waitForFinish.countDown(); }); } catch (InterruptedException ex) { Logger.getLogger(Bagging.class.getName()).log(Level.SEVERE, null, ex); System.err.println(ex.getMessage()); } } else { learner.train(sampleSet, parallel); learners.add(learner); } } if (simultaniousTraining && parallel) try { waitForFinish.await(); } catch (InterruptedException ex) { Logger.getLogger(Bagging.class.getName()).log(Level.SEVERE, null, ex); System.err.println(ex.getMessage()); } threadPool.shutdownNow(); } @Override public Bagging clone() { Bagging clone = new Bagging(extraSamples, simultaniousTraining, rounds, new Random(rounds)); if(baseClassifier != null) clone.baseClassifier = baseClassifier.clone(); if(predicting != null) clone.predicting = this.predicting.clone(); if(baseRegressor != null) clone.baseRegressor = baseRegressor.clone(); if(learners != null && !learners.isEmpty()) { clone.learners = new ArrayList(this.learners.size()); for(Object learner : learners) if(learner instanceof Classifier) clone.learners.add( ((Classifier)learner).clone()); else clone.learners.add( ((Regressor)learner).clone()); } return clone; } }
19,898
40.370062
161
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/boosting/EmphasisBoost.java
package jsat.classifiers.boosting; import java.util.ArrayList; import java.util.Collections; import java.util.List; import jsat.DataSet; import jsat.classifiers.*; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.classifiers.trees.DecisionTree; import jsat.distributions.Distribution; import jsat.distributions.Uniform; import jsat.parameters.Parameter.ParameterHolder; import jsat.parameters.Parameterized; import jsat.utils.DoubleList; import jsat.utils.FakeExecutor; /** * Emphasis Boost is a generalization of the Real AdaBoost algorithm, expanding * the update term and providing the {@link #setLambda(double) &lambda; } term * to control the trade off. With &lambda; = 1/2, it becomes equivalent to Real * AdaBoost. If the weak learner does not support confidence outputs (non-hard * decisions), then it further becomes equivalent to Discrete Ada Boost. <br> * Emphasis Boost only supports binary classification problems, the learner used * should support weighted predictions. * <br><br> * NOTE: In the face of extreme outliers, it is possible for numerical * instability to occur. This implementation attempts to reset weights when * numerical issues occur. * <br><br> * See: <br> * Gómez-Verdejo, V., Ortega-Moral, M., Arenas-García, J.,&amp;Figueiras-Vidal, * A. R. (2006). <i>Boosting by weighting critical and erroneous samples</i>. * Neurocomputing, 69(7-9), 679–685. doi:10.1016/j.neucom.2005.12.011 * * @author Edward Raff */ public class EmphasisBoost implements Classifier, Parameterized, BinaryScoreClassifier { private static final long serialVersionUID = -6372897830449685891L; @ParameterHolder private Classifier weakLearner; private int maxIterations; /** * The list of weak hypothesis */ protected List<Classifier> hypoths; /** * The weights for each weak learner */ protected List<Double> hypWeights; protected CategoricalData predicting; private double lambda; /** * Creates a new EmphasisBooster with shallow decision trees and &lambda; = 0.35 */ public EmphasisBoost() { this(new DecisionTree(6), 200, 0.35); } /** * Creates a new EmphasisBoost learner * @param weakLearner the weak learner to use * @param maxIterations the maximum number of boosting iterations * @param lambda the trade off parameter in [0, 1] */ public EmphasisBoost(Classifier weakLearner, int maxIterations, double lambda) { setWeakLearner(weakLearner); setMaxIterations(maxIterations); setLambda(lambda); } /** * Copy constructor * @param toClone the object to clone */ protected EmphasisBoost(EmphasisBoost toClone) { this(toClone.weakLearner.clone(), toClone.maxIterations, toClone.lambda); if(toClone.hypWeights != null) { this.hypWeights = new DoubleList(toClone.hypWeights); this.hypoths = new ArrayList<Classifier>(toClone.maxIterations); for(Classifier weak : toClone.hypoths) this.hypoths.add(weak.clone()); this.predicting = toClone.predicting.clone(); } } /** * * @return a list of the models that are in this ensemble. */ public List<Classifier> getModels() { return Collections.unmodifiableList(hypoths); } /** * * @return a list of the models weights that are in this ensemble. */ public List<Double> getModelWeights() { return Collections.unmodifiableList(hypWeights); } /** * Returns the maximum number of iterations used * @return the maximum number of iterations used */ public int getMaxIterations() { return maxIterations; } /** * Sets the maximal number of boosting iterations that may be performed * @param maxIterations the maximum number of iterations */ public void setMaxIterations(int maxIterations) { if(maxIterations < 1) throw new IllegalArgumentException("Iterations must be positive, not " + maxIterations); this.maxIterations = maxIterations; } /** * Returns the weak learner currently being used by this method. * @return the weak learner currently being used by this method. */ public Classifier getWeakLearner() { return weakLearner; } /** * Sets the weak learner used during training. * @param weakLearner the weak learner to use */ public void setWeakLearner(Classifier weakLearner) { if(!weakLearner.supportsWeightedData()) throw new IllegalArgumentException("WeakLearner must support weighted data to be boosted"); this.weakLearner = weakLearner; } /** * Guesses the distribution to use for the &lambda; parameter * * @param d the dataset to get the guess for * @return the guess for the &lambda; parameter * @see #setLambda(double) */ public static Distribution guessLambda(DataSet d) { return new Uniform(0.25, 0.45); } /** * &lambda; controls the trade off between weighting the errors based on * their distance to the margin and the quadratic error of the output. The * three extreme values are: <br> * <ul> * <li> &lambda; = 0 , in this case all the weight is placed on points based * on their distance to the margin of the classification boundary. </li> * <li>&lambda; = 1/2, in this case weight is balanced between the margin * distance and the quadratic error. This is equivalent to Real Ada Boost * </li> * <li>&lambda; = 1, in this case the weight is placed purely based on the * quadratic error of the output</li> * </ul> * <br><br> According to the original paper, values in the range [0.3, 0.4] * often perform well. * * @param lambda the trade off parameter in [0, 1] */ public void setLambda(double lambda) { this.lambda = lambda; } /** * Returns the value of the &lambda; trade off parameter * @return the value of the &lambda; trade off parameter */ public double getLambda() { return lambda; } @Override public double getScore(DataPoint dp) { double score = 0; for(int i = 0; i < hypoths.size(); i++) score += H(hypoths.get(i), dp)*hypWeights.get(i); return score; } @Override public CategoricalResults classify(DataPoint data) { if(predicting == null) throw new RuntimeException("Classifier has not been trained yet"); CategoricalResults cr = new CategoricalResults(predicting.getNumOfCategories()); double score = getScore(data); if(score < 0) cr.setProb(0, 1.0); else cr.setProb(1, 1.0); return cr; } private double H(Classifier weak, DataPoint dp ) { CategoricalResults catResult = weak.classify(dp); return catResult.getProb(1)*2-1; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { predicting = dataSet.getPredicting(); hypWeights = new DoubleList(maxIterations); hypoths = new ArrayList<>(maxIterations); final int N = dataSet.size(); ClassificationDataSet cds = dataSet.shallowClone(); //Initialization step, set up the weights so they are all 1 / size of dataset for(int i = 0; i < cds.size(); i++) cds.setWeight(i, 1.0/N);//Scaled, they are all 1 double weightSum = 1; //Keep track of the cumaltive score for everything double[] H_cur = new double[N]; double[] curH_Result = new double[N]; for(int t = 0; t < maxIterations; t++) { Classifier weak = weakLearner.clone(); weak.train(cds, parallel); double error = 0.0; for(int i = 0; i < cds.size(); i++) { DataPoint dp = cds.getDataPoint(i); double y_hat = H_cur[i] = H(weak, dp); double y_true = cds.getDataPointCategory(i)*2-1;//{-1 or 1} error += cds.getWeight(i)*y_hat*y_true; } if(error < 0) return; double alpha_m = Math.log((1+error)/(1-error))/2; weightSum = 0; for(int i = 0; i < cds.size(); i++) { curH_Result[i] += alpha_m * H_cur[i]; double f_t = curH_Result[i]; DataPoint dp = cds.getDataPoint(i); double y_true = cds.getDataPointCategory(i)*2-1; double w_i = Math.exp(lambda*Math.pow(f_t-y_true, 2) - (1-lambda)*f_t*f_t); if(Double.isInfinite(w_i)) w_i = 50;//Let it grow back isntead of bizaro huge values weightSum += w_i; cds.setWeight(i, w_i); } for(int i = 0; i < cds.size(); i++) cds.setWeight(i, cds.getWeight(i)/weightSum); hypoths.add(weak); hypWeights.add(alpha_m); } } @Override public boolean supportsWeightedData() { return false; } @Override public EmphasisBoost clone() { return new EmphasisBoost(this); } }
9,641
30.821782
103
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/boosting/LogitBoost.java
package jsat.classifiers.boosting; import java.util.ArrayList; import java.util.Collections; import java.util.List; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.classifiers.DataPointPair; import jsat.classifiers.OneVSAll; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.parameters.Parameterized; import jsat.regression.MultipleLinearRegression; import jsat.regression.RegressionDataSet; import jsat.regression.Regressor; /** * An implementation of the original 2 class LogitBoost algorithm. While there is a * multi-class description in the original paper, its implementation is congruent * with the result of using LogitBoost with {@link OneVSAll} classifier. <br> * LogitBoost differs from its predecessors in that it boosts * {@link Regressor regression} models to create a powerful classifier. * <br><br> * Paper: <b>Special Invited Paper Additive Logistic Regression: A Statistical * View of Boosting</b>, By Jerome Friedman, Trevor Hastie and Robert Tibshirani. * <i>The Annals of Statistics</i> 2000, Vol. 28, No. 2, 337–407 * * @author Edward Raff */ public class LogitBoost implements Classifier, Parameterized { private static final long serialVersionUID = 1621062168467402062L; /** * The constant factor that the sum of regressors is scaled by. */ protected double fScaleConstant = 0.5; /** * Weak learners */ protected List<Regressor> baseLearners; /** * Weak learner to use, 'the oracle' */ protected Regressor baseLearner; private int maxIterations; /** * Constant for stability and controls the maximum penalty */ private double zMax = 3; /** * Creates a new LogitBoost using the standard {@link MultipleLinearRegression} . * @param M the maximum number of iterations. */ public LogitBoost(int M) { this(new MultipleLinearRegression(true), M); } /** * Creates a new LogitBoost using the given base learner. * @param baseLearner the weak learner to build an ensemble out of. * @param M the maximum number of iterations. */ public LogitBoost(Regressor baseLearner, int M) { if(!baseLearner.supportsWeightedData()) throw new RuntimeException("Base Learner must support weighted data points to be boosted"); this.baseLearner = baseLearner; this.maxIterations = M; } /** * * @return a list of the models that are in this ensemble. */ public List<Regressor> getModels() { return Collections.unmodifiableList(baseLearners); } /** * Sets the maximum number of iterations of boosting that can occur, giving * the maximum number of base learners that may be trained * @param maxIterations the maximum number of iterations */ public void setMaxIterations(int maxIterations) { this.maxIterations = maxIterations; } /** * The maximum number of iterations of boosting that may occur. * @return maximum number of iterations of boosting that may occur. */ public int getMaxIterations() { return maxIterations; } /** * Sets the penalty bound for miss-classification of results. This also provides * numerical stability to the algorithm. The results are not sensitive to this * value. The recommended value range is in [2, 4] * * @param zMax the penalty bound * @throws ArithmeticException if the value is not in (0, {@link Double#MAX_VALUE}] */ public void setzMax(double zMax) { if(Double.isInfinite(zMax) || Double.isNaN(zMax) || zMax <= 0) throw new ArithmeticException("Invalid penalty given: " + zMax); this.zMax = zMax; } /** * Returns the maximum miss-classification penalty used by the algorithm. * @return the maximum miss-classification */ public double getzMax() { return zMax; } @Override public CategoricalResults classify(DataPoint data) { if(baseLearner == null) throw new UntrainedModelException("Model has not yet been trained"); double p = P(data); CategoricalResults cr = new CategoricalResults(2); cr.setProb(1, p); cr.setProb(0, 1.0-p); return cr; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { if(dataSet.getClassSize() != 2) throw new FailedToFitException("LogitBoost only supports binary decision tasks, not " + dataSet.getClassSize() + " class problems"); /** * The data points paired with what we will use to store the target regression values. */ RegressionDataSet rds = new RegressionDataSet(dataSet.getAsFloatDPPList()); baseLearners = new ArrayList<>(maxIterations); int N = dataSet.size(); for(int m = 0; m < maxIterations; m++) { for(int i = 0; i < N; i++) { DataPoint dp = rds.getDataPoint(i); double pi = P(dp); double zi; if(dataSet.getDataPointCategory(i) == 1) zi = Math.min(zMax, 1.0/pi); else zi = Math.max(-zMax, -1.0/(1.0-pi)); double wi = Math.max(pi*(1-pi), 2*1e-15); rds.setWeight(i, wi); rds.setTargetValue(i, zi); } Regressor f = baseLearner.clone(); f.train(rds); baseLearners.add(f); } } private double F(DataPoint x) { double fx = 0.0;//0 so when we are uninitalized P will return 0.5 for(Regressor fm : baseLearners) fx += fm.regress(x); return fx*fScaleConstant; } /** * Returns the probability that a given data point belongs to class 1 * @param x the data point in question * @return P(y = 1 | x) */ protected double P(DataPoint x) { /** * F(x) * e * p(x) = --------------- * F(x) - F(x) * e + e */ double fx = F(x); double efx = Math.exp(fx); double enfx = Math.exp(-fx); if(Double.isInfinite(efx) && efx > 0 && enfx < 1e-15)//Well classified point could return a Infinity which turns into NaN return 1.0; return efx/(efx + enfx); } @Override public boolean supportsWeightedData() { return false; } @Override public LogitBoost clone() { LogitBoost clone = new LogitBoost(maxIterations); clone.zMax = this.zMax; if(this.baseLearner != null) clone.baseLearner = this.baseLearner.clone(); if(this.baseLearners != null) { clone.baseLearners = new ArrayList<>(this.baseLearners.size()); for(Regressor r : baseLearners) clone.baseLearners.add(r.clone()); } return clone; } }
7,327
30.316239
144
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/boosting/LogitBoostPL.java
package jsat.classifiers.boosting; import java.util.List; import java.util.ArrayList; import jsat.classifiers.ClassificationDataSet; import jsat.regression.Regressor; import static jsat.utils.SystemInfo.*; import jsat.utils.concurrent.ParallelUtils; /** * An extension to the original LogitBoost algorithm for parallel training. * This comes at an increase in classification time. * <br> * Note: LogitBoost is a semi unstable algorithm, and this method does in fact increase the instability. * In most cases, similar classification results will be obtained, however - under some circumstances * the performance may be significantly degraded. Especially if there is insufficient data to distribute * for parallel computation. The results for LogitBoost seem to be over stated in the original paper. * <br> * See: <i>Scalable and Parallel Boosting with MapReduce</i>, Indranil Palit and Chandan K. Reddy, IEEE Transactions on Knowledge and Data Engineering * @author Edward Raff */ public class LogitBoostPL extends LogitBoost { private static final long serialVersionUID = -7932049860430324903L; public LogitBoostPL(Regressor baseLearner, int M) { super(baseLearner, M); } public LogitBoostPL(int M) { super(M); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { /* * Implementation Note: * In the original paper, we sort the weak hypotheses of the LogBoost workers * by their unweighted accuracy. Then each regressor is averaged in its sorted * group index. However, if we have M worked, each merged regressor is the sum * of the parts divided by M, ie: the average. Applythis to the whole of the * data set, we get the same result as if we add all regressors to the data * set, getting M*Iteration hypothesis with the sume scaled by 1/(M) * isntead of Iteration hypothesis scaled by 1/2 * * Applied this, we can simplify the implementation and avoid M sortings * */ List<ClassificationDataSet> subSets = dataSet.cvSet(LogicalCores); this.baseLearners = new ArrayList<>(LogicalCores * getMaxIterations()); ParallelUtils.streamP(subSets.stream(), parallel).forEach((subSet)-> { LogitBoost boost = new LogitBoost(baseLearner.clone(), getMaxIterations()); boost.train(subSet); for(Regressor r : boost.baseLearners) baseLearners.add(r); }); this.fScaleConstant = 1.0; if(parallel) this.fScaleConstant /= LogicalCores; } @Override public LogitBoostPL clone() { LogitBoostPL clone = new LogitBoostPL(getMaxIterations()); clone.setzMax(getzMax()); if(this.baseLearner != null) clone.baseLearner = this.baseLearner.clone(); if(this.baseLearners != null) { clone.baseLearners = new ArrayList<>(this.baseLearners.size()); for(Regressor r : baseLearners) clone.baseLearners.add(r.clone()); } return clone; } }
3,250
33.585106
150
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/boosting/ModestAdaBoost.java
package jsat.classifiers.boosting; import java.util.*; import jsat.classifiers.*; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.parameters.Parameterized; import jsat.utils.DoubleList; /** * Modest Ada Boost is a generalization of Discrete Ada Boost that attempts to * reduce the generalization error and avoid over-fitting. Empirically, * ModestBoost usually maintains a higher training-set error, and may take more * iterations to obtain the same test set error as other algorithms, but doesn't * not increase as much after it reaches the minimum error - which should make * it easier to obtain the higher accuracy. * <br> * See: <br> * Vezhnevets, A.,&amp;Vezhnevets, V. (2005). <i>“Modest AdaBoost” – Teaching * AdaBoost to Generalize Better</i>. GraphiCon. Novosibirsk Akademgorodok, * Russia. Retrieved from * <a href="http://www.inf.ethz.ch/personal/vezhneva/Pubs/ModestAdaBoost.pdf"> * here</a> * * @author Edward Raff */ public class ModestAdaBoost implements Classifier, Parameterized, BinaryScoreClassifier { private static final long serialVersionUID = 8223388561185098909L; private Classifier weakLearner; private int maxIterations; /** * The list of weak hypothesis */ protected List<Classifier> hypoths; /** * The weights for each weak learner */ protected List<Double> hypWeights; protected CategoricalData predicting; /** * Creates a new ModestBoost learner * @param weakLearner the weak learner to use * @param maxIterations the maximum number of boosting iterations */ public ModestAdaBoost(Classifier weakLearner, int maxIterations) { setWeakLearner(weakLearner); setMaxIterations(maxIterations); } /** * Copy constructor * @param toClone the object to clone */ protected ModestAdaBoost(ModestAdaBoost toClone) { this(toClone.weakLearner.clone(), toClone.maxIterations); if(toClone.hypWeights != null) { this.hypWeights = new DoubleList(toClone.hypWeights); this.hypoths = new ArrayList<Classifier>(toClone.maxIterations); for(Classifier weak : toClone.hypoths) this.hypoths.add(weak.clone()); this.predicting = toClone.predicting.clone(); } } /** * * @return a list of the models that are in this ensemble. */ public List<Classifier> getModels() { return Collections.unmodifiableList(hypoths); } /** * * @return a list of the models weights that are in this ensemble. */ public List<Double> getModelWeights() { return Collections.unmodifiableList(hypWeights); } /** * Returns the maximum number of iterations used * @return the maximum number of iterations used */ public int getMaxIterations() { return maxIterations; } /** * Sets the maximal number of boosting iterations that may be performed * @param maxIterations the maximum number of iterations */ public void setMaxIterations(int maxIterations) { if(maxIterations < 1) throw new IllegalArgumentException("Iterations must be positive, not " + maxIterations); this.maxIterations = maxIterations; } /** * Returns the weak learner currently being used by this method. * @return the weak learner currently being used by this method. */ public Classifier getWeakLearner() { return weakLearner; } /** * Sets the weak learner used during training. * @param weakLearner the weak learner to use */ public void setWeakLearner(Classifier weakLearner) { if(!weakLearner.supportsWeightedData()) throw new IllegalArgumentException("WeakLearner must support weighted data to be boosted"); this.weakLearner = weakLearner; } @Override public double getScore(DataPoint dp) { double score = 0; for(int i = 0; i < hypoths.size(); i++) score += (hypoths.get(i).classify(dp).getProb(1)*2-1)*hypWeights.get(i); return score; } @Override public CategoricalResults classify(DataPoint data) { if(predicting == null) throw new RuntimeException("Classifier has not been trained yet"); CategoricalResults cr = new CategoricalResults(predicting.getNumOfCategories()); double score = getScore(data); if(score < 0) cr.setProb(0, 1.0); else cr.setProb(1, 1.0); return cr; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { predicting = dataSet.getPredicting(); hypWeights = new DoubleList(maxIterations); hypoths = new ArrayList<Classifier>(maxIterations); final int N = dataSet.size(); double[] D_inv = new double[N]; double[] D = new double[N]; ClassificationDataSet cds = dataSet.shallowClone(); Arrays.fill(D, 1.0/N); for(int i = 0; i < N; i++) cds.setWeight(i, D[0]);//Scaled, they are all 1 double weightSum = 1; double[] H_cur = new double[N]; for(int t = 0; t < maxIterations; t++) { Classifier weak = weakLearner.clone(); weak.train(cds, parallel); double invSum = 0; for(int i = 0; i < N; i++) invSum += (D_inv[i] = 1-D[i]); for(int i = 0; i < N; i++) D_inv[i] /= invSum; double p_d = 0, p_id = 0, n_d = 0, n_id = 0; for(int i = 0; i < N; i++) { H_cur[i] = (weak.classify(cds.getDataPoint(i)).getProb(1)*2-1); double outPut = Math.signum(H_cur[i]); int c = cds.getDataPointCategory(i); if(c == 1)//positive example case { p_d += outPut * D[i]; p_id += outPut * D_inv[i]; } else { n_d += outPut * D[i]; n_id += outPut * D_inv[i]; } } double alpha_m = p_d * (1 - p_id) - n_d * (1 - n_id); if(Math.signum(alpha_m) != Math.signum(p_d-n_d) || Math.abs((p_d - n_d)) < 1e-6 || alpha_m <= 0) return; weightSum = 0; for(int i = 0; i < N; i++) { double w_i = cds.getWeight(i); int y_i = cds.getDataPointCategory(i)*2-1; w_i *= Math.exp(-y_i*alpha_m*H_cur[i]); if(Double.isInfinite(w_i)) w_i = 1;//Let it grow back else if(w_i <= 0) w_i = 1e-3/N;//Dont let it go quit to zero weightSum += w_i; cds.setWeight(i, w_i); } for(int i = 0; i < N; i++) cds.setWeight(i, Math.max(cds.getWeight(i)/weightSum, 1e-10)); hypWeights.add(alpha_m); hypoths.add(weak); } } @Override public boolean supportsWeightedData() { return false; } @Override public ModestAdaBoost clone() { return new ModestAdaBoost(this); } }
7,535
30.140496
108
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/boosting/SAMME.java
package jsat.classifiers.boosting; import java.util.ArrayList; import java.util.Collections; import java.util.List; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.parameters.Parameterized; import jsat.utils.DoubleList; /** * This is an implementation of the Multi-Class AdaBoost method SAMME (Stagewise Additive Modeling using * a Multi-Class Exponential loss function), presented in <i>Multi-class AdaBoost</i> by Ji Zhu, * Saharon Rosset, Hui Zou,&amp;Trevor Hasstie <br> * <br> * This algorithm reduces to {@link AdaBoostM1 } for binary classification problems. Its often performs * better for <i>k</i> class classification problems, and has a weaker requirement of besting 1/<i>k</i> * accuracy for any k instead of 1/2. * * @author Edward Raff */ public class SAMME implements Classifier, Parameterized { private static final long serialVersionUID = -3584203799253810599L; private Classifier weakLearner; private int maxIterations; /** * The list of weak hypothesis */ private List<Classifier> hypoths; /** * The weights for each weak learner */ private List<Double> hypWeights; private CategoricalData predicting; public SAMME(Classifier weakLearner, int maxIterations) { if(!weakLearner.supportsWeightedData()) throw new RuntimeException("WeakLearner must support weighted data to be boosted"); this.weakLearner = weakLearner; this.maxIterations = maxIterations; } @Override public CategoricalResults classify(DataPoint data) { if(predicting == null) throw new RuntimeException("Classifier has not been trained yet"); CategoricalResults cr = new CategoricalResults(predicting.getNumOfCategories()); for(int i=0; i < hypoths.size(); i++) cr.incProb(hypoths.get(i).classify(data).mostLikely(), hypWeights.get(i)); cr.normalize(); return cr; } /** * * @return a list of the models that are in this ensemble. */ public List<Classifier> getModels() { return Collections.unmodifiableList(hypoths); } /** * * @return a list of the models weights that are in this ensemble. */ public List<Double> getModelWeights() { return Collections.unmodifiableList(hypWeights); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { predicting = dataSet.getPredicting(); hypWeights = new DoubleList(maxIterations); hypoths = new ArrayList<>(); /** * The number of classes we are predicting */ int K = predicting.getNumOfCategories(); double logK = Math.log(K-1.0)/Math.log(2); ClassificationDataSet cds = dataSet.shallowClone(); //Initialization step, set up the weights so they are all 1 / size of dataset for(int i = 0; i < cds.size(); i++) cds.setWeight(i, 1.0);//Scaled, they are all 1 double sumOfWeights = cds.size(); //Rather then reclasify points, we just save this list boolean[] wasCorrect = new boolean[cds.size()]; for(int t = 0; t < maxIterations; t++) { weakLearner.train(cds, parallel); //Error is the same as in AdaBoost.M1 double error = 0.0; for(int i = 0; i < cds.size(); i++) if( !(wasCorrect[i] = weakLearner.classify(cds.getDataPoint(i)).mostLikely() == cds.getDataPointCategory(i)) ) error += cds.getWeight(i); error /= sumOfWeights; if(error >= (1.0-1.0/K) || error == 0.0)///Diference, we only need to be better then random guessing classes return; //The main difference - a different error term double am = Math.log((1.0-error)/error)/Math.log(2) +logK; //Update Distribution weights for(int i = 0; i < wasCorrect.length; i++) { if(!wasCorrect[i]) { double w = cds.getWeight(i); double newW = w*Math.exp(am); if(Double.isInfinite(newW))//weight explosoin! Force it back down newW = 1.0; sumOfWeights += (newW-w); cds.setWeight(i, newW); } } hypoths.add(weakLearner.clone()); hypWeights.add(am); } } @Override public boolean supportsWeightedData() { return false; } @Override public SAMME clone() { SAMME clone = new SAMME(weakLearner.clone(), maxIterations); if(this.hypWeights != null) clone.hypWeights = new DoubleList(this.hypWeights); if(this.hypoths != null) { clone.hypoths = new ArrayList<>(this.hypoths.size()); for(int i = 0; i < this.hypoths.size(); i++) clone.hypoths.add(this.hypoths.get(i).clone()); } if(this.predicting != null) clone.predicting = this.predicting.clone(); return clone; } }
5,382
32.02454
126
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/boosting/Stacking.java
package jsat.classifiers.boosting; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import jsat.classifiers.*; import jsat.classifiers.linear.LinearBatch; import jsat.linear.DenseVector; import jsat.linear.Vec; import jsat.regression.RegressionDataSet; import jsat.regression.Regressor; /** * This provides an implementation of the Stacking ensemble method. Stacking * learns several base classifiers and a top level classifier learns to predict * the target based on the outputs of all the ensambled models. Historically a * linear model (such as {@link LinearBatch}) is used, which translates to * learning a weighted vote of the classifier outputs. However any classifier * may be used so long as it supports the desired target type. <br> * <br> * Note, that Stacking tends to work best when the base classifiers produce * reasonable probability estimates. <br> * Stacking supports {@link #supportsWeightedData() weighted data instances} if * the aggregating model does. * <br> * See: Wolpert, D. H. (1992). Stacked generalization. Neural Networks, 5, 241–259. * * @author Edward Raff */ public class Stacking implements Classifier, Regressor { private static final long serialVersionUID = -6173323872903232074L; private int folds; /** * The number of weights needed per model */ private int weightsPerModel; private Classifier aggregatingClassifier; private List<Classifier> baseClassifiers; private Regressor aggregatingRegressor; private List<Regressor> baseRegressors; public static final int DEFAULT_FOLDS = 3; /** * Creates a new Stacking classifier * @param folds the number of cross validation folds for learning the base model * @param aggregatingClassifier the classifier used to merge the results of all the input classifiers * @param baseClassifiers the list of base classifiers to ensemble */ public Stacking(int folds, Classifier aggregatingClassifier, List<Classifier> baseClassifiers) { if(baseClassifiers.size() < 2) throw new IllegalArgumentException("base classifiers must contain at least 2 elements, not " + baseClassifiers.size()); setFolds(folds); this.aggregatingClassifier = aggregatingClassifier; this.baseClassifiers = baseClassifiers; boolean allRegressors = aggregatingClassifier instanceof Regressor; for(Classifier cl : baseClassifiers) if(!(cl instanceof Regressor)) allRegressors = false; if(allRegressors) { aggregatingRegressor = (Regressor) aggregatingClassifier; baseRegressors = (List) baseClassifiers;//ugly type easure exploitation... } } /** * Creates a new Stacking classifier * @param folds the number of cross validation folds for learning the base model * @param aggregatingClassifier the classifier used to merge the results of all the input classifiers * @param baseClassifiers the array of base classifiers to ensemble */ public Stacking(int folds, Classifier aggregatingClassifier, Classifier... baseClassifiers) { this(folds, aggregatingClassifier, Arrays.asList(baseClassifiers)); } /** * Creates a new Stacking classifier that uses {@value #DEFAULT_FOLDS} folds of cross validation * @param aggregatingClassifier the classifier used to merge the results of all the input classifiers * @param baseClassifiers the list of base classifiers to ensemble */ public Stacking(Classifier aggregatingClassifier, List<Classifier> baseClassifiers) { this(DEFAULT_FOLDS, aggregatingClassifier, baseClassifiers); } /** * Creates a new Stacking classifier that uses {@value #DEFAULT_FOLDS} folds of cross validation * @param aggregatingClassifier the classifier used to merge the results of all the input classifiers * @param baseClassifiers the array of base classifiers to ensemble */ public Stacking(Classifier aggregatingClassifier, Classifier... baseClassifiers) { this(DEFAULT_FOLDS, aggregatingClassifier, baseClassifiers); } /** * Creates a new Stacking regressor * @param folds the number of cross validation folds for learning the base model * @param aggregatingRegressor the regressor used to merge the results of all the input classifiers * @param baseRegressors the list of base regressors to ensemble */ public Stacking(int folds, Regressor aggregatingRegressor, List<Regressor> baseRegressors) { setFolds(folds); this.aggregatingRegressor = aggregatingRegressor; this.baseRegressors = baseRegressors; boolean allClassifiers = aggregatingRegressor instanceof Classifier; for(Regressor reg : baseRegressors) if(!(reg instanceof Classifier)) allClassifiers = false; if(allClassifiers) { aggregatingClassifier = (Classifier) aggregatingRegressor; baseClassifiers = (List) baseRegressors;//ugly type easure exploitation... } } /** * Creates a new Stacking regressor * @param folds the number of cross validation folds for learning the base model * @param aggregatingRegressor the regressor used to merge the results of all the input classifiers * @param baseRegressors the array of base regressors to ensemble */ public Stacking(int folds, Regressor aggregatingRegressor, Regressor... baseRegressors) { this(folds, aggregatingRegressor, Arrays.asList(baseRegressors)); } /** * Creates a new Stacking regressor that uses {@value #DEFAULT_FOLDS} folds of cross validation * @param aggregatingRegressor the regressor used to merge the results of all the input classifiers * @param baseRegressors the list of base regressors to ensemble */ public Stacking(Regressor aggregatingRegressor, List<Regressor> baseRegressors) { this(DEFAULT_FOLDS, aggregatingRegressor, baseRegressors); } /** * Creates a new Stacking regressor that uses {@value #DEFAULT_FOLDS} folds of cross validation * @param aggregatingRegressor the regressor used to merge the results of all the input classifiers * @param baseRegressors the array of base regressors to ensemble */ public Stacking(Regressor aggregatingRegressor, Regressor... baseRegressors) { this(DEFAULT_FOLDS, aggregatingRegressor, baseRegressors); } /** * Copy constructor * @param toCopy the object to copy */ public Stacking(Stacking toCopy) { this.folds = toCopy.folds; this.weightsPerModel = toCopy.weightsPerModel; if(toCopy.aggregatingClassifier != null) { this.aggregatingClassifier = toCopy.aggregatingClassifier.clone(); this.baseClassifiers = new ArrayList<Classifier>(toCopy.baseClassifiers.size()); for(Classifier bc : toCopy.baseClassifiers) this.baseClassifiers.add(bc.clone()); if(toCopy.aggregatingRegressor == toCopy.aggregatingClassifier)//supports both { aggregatingRegressor = (Regressor) aggregatingClassifier; baseRegressors = (List) baseClassifiers;//ugly type easure exploitation... } } else//we are doing with regressors only { this.aggregatingRegressor = toCopy.aggregatingRegressor.clone(); this.baseRegressors = new ArrayList<Regressor>(toCopy.baseRegressors.size()); for(Regressor br : toCopy.baseRegressors) this.baseRegressors.add(br.clone()); } } /** * Sets the number of folds of cross validation to use when creating the new * set of weights that will be feed into the aggregating model. <br> * Note that the number of folds may be 1, and will run significantly * faster since models do not need to be re-trained. However it will be more * prone to overfitting. * @param folds the number of cross validation folds to use */ public void setFolds(int folds) { if(folds < 1) throw new IllegalArgumentException("Folds must be a positive integer, not " + folds); this.folds = folds; } /** * * @return the number of CV folds used for training */ public int getFolds() { return folds; } @Override public CategoricalResults classify(DataPoint data) { Vec w = new DenseVector(weightsPerModel*baseClassifiers.size()); if(weightsPerModel == 1) for(int i = 0; i < baseClassifiers.size(); i++) w.set(i, baseClassifiers.get(i).classify(data).getProb(0)*2-1); else { for(int i = 0; i < baseClassifiers.size(); i++) { CategoricalResults pred = baseClassifiers.get(i).classify(data); for(int j = 0; j < weightsPerModel; j++) w.set(i*weightsPerModel+j, pred.getProb(j)); } } return aggregatingClassifier.classify(new DataPoint(w)); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { final int models = baseClassifiers.size(); final int C = dataSet.getClassSize(); weightsPerModel = C == 2 ? 1 : C; ClassificationDataSet metaSet = new ClassificationDataSet(weightsPerModel*models, new CategoricalData[0], dataSet.getPredicting()); List<ClassificationDataSet> dataFolds = dataSet.cvSet(folds); //iterate in the order of the folds so we get the right dataum weights for (ClassificationDataSet cds : dataFolds) for (int i = 0; i < cds.size(); i++) { metaSet.addDataPoint(new DenseVector(weightsPerModel * models), cds.getDataPointCategory(i)); metaSet.setWeight(i, cds.getWeight(i)); } //create the meta training set for(int c = 0; c < baseClassifiers.size(); c++) { Classifier cl = baseClassifiers.get(c); int pos = 0; for(int f = 0; f < dataFolds.size(); f++) { ClassificationDataSet train = ClassificationDataSet.comineAllBut(dataFolds, f); ClassificationDataSet test = dataFolds.get(f); cl.train(train, parallel); for(int i = 0; i < test.size(); i++)//evaluate and mark each point in the held out fold. { CategoricalResults pred = cl.classify(test.getDataPoint(i)); if(C == 2) metaSet.getDataPoint(pos).getNumericalValues().set(c, pred.getProb(0)*2-1); else { Vec toSet = metaSet.getDataPoint(pos).getNumericalValues(); for(int j = weightsPerModel*c; j < weightsPerModel*(c+1); j++) toSet.set(j, pred.getProb(j-weightsPerModel*c)); } pos++; } } } //train the meta model aggregatingClassifier.train(metaSet, parallel); //train the final classifiers, unless folds=1. In that case they are already trained if(folds != 1) { for(Classifier cl : baseClassifiers) cl.train(dataSet, parallel); } } @Override public boolean supportsWeightedData() { if(aggregatingClassifier != null) return aggregatingClassifier.supportsWeightedData(); else return aggregatingRegressor.supportsWeightedData(); } @Override public double regress(DataPoint data) { Vec w = new DenseVector(baseRegressors.size()); for (int i = 0; i < baseRegressors.size(); i++) w.set(i, baseRegressors.get(i).regress(data)); return aggregatingRegressor.regress(new DataPoint(w)); } @Override public void train(RegressionDataSet dataSet, boolean parallel) { final int models = baseRegressors.size(); weightsPerModel = 1; RegressionDataSet metaSet = new RegressionDataSet(models, new CategoricalData[0]); List<RegressionDataSet> dataFolds = dataSet.cvSet(folds); //iterate in the order of the folds so we get the right dataum weights for (RegressionDataSet rds : dataFolds) for (int i = 0; i < rds.size(); i++) { metaSet.addDataPoint(new DataPoint(new DenseVector(weightsPerModel * models)), rds.getTargetValue(i)); metaSet.setWeight(i, rds.getWeight(i)); } //create the meta training set for(int c = 0; c < baseRegressors.size(); c++) { Regressor reg = baseRegressors.get(c); int pos = 0; for(int f = 0; f < dataFolds.size(); f++) { RegressionDataSet train = RegressionDataSet.comineAllBut(dataFolds, f); RegressionDataSet test = dataFolds.get(f); reg.train(train, parallel); for(int i = 0; i < test.size(); i++)//evaluate and mark each point in the held out fold. { double pred = reg.regress(test.getDataPoint(i)); metaSet.getDataPoint(pos++).getNumericalValues().set(c, pred); } } } //train the meta model aggregatingRegressor.train(metaSet, parallel); //train the final classifiers, unless folds=1. In that case they are already trained if(folds != 1) { for(Regressor reg : baseRegressors) reg.train(dataSet, parallel); } } @Override public Stacking clone() { return new Stacking(this); } }
14,141
37.851648
139
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/boosting/UpdatableStacking.java
package jsat.classifiers.boosting; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import jsat.classifiers.*; import jsat.classifiers.linear.LinearBatch; import jsat.linear.DenseVector; import jsat.linear.Vec; import jsat.regression.BaseUpdateableRegressor; import jsat.regression.RegressionDataSet; import jsat.regression.UpdateableRegressor; /** * This provides an implementation of the Stacking ensemble method meant for * Updatable models. Stacking learns several base classifiers and a top level * classifier learns to predict the target based on the outputs of all the * ensambled models. Historically a linear model (such as {@link LinearBatch}) * is used, which translates to learning a weighted vote of the classifier * outputs. However any classifier may be used so long as it supports the * desired target type. <br> * <br> * Note, that Stacking tends to work best when the base classifiers produce * reasonable probability estimates. <br> * Stacking supports {@link #supportsWeightedData() weighted data instances} if * the aggregating model does. * <br> * See: Wolpert, D. H. (1992). Stacked generalization. Neural Networks, 5, 241–259. * * @author Edward Raff */ public class UpdatableStacking implements UpdateableClassifier, UpdateableRegressor { /* * TODO should investigate providing a 'skip' paramter, as the first few * predictions from the base models are going to be rubbish. So let them * settle in a littl ebefore we start updating the aggregator off their * predictions */ private static final long serialVersionUID = -5111303510263114862L; /** * The number of weights needed per model */ private int weightsPerModel; private UpdateableClassifier aggregatingClassifier; private List<UpdateableClassifier> baseClassifiers; private UpdateableRegressor aggregatingRegressor; private List<UpdateableRegressor> baseRegressors; /** * Creates a new Stacking classifier * @param aggregatingClassifier the classifier used to merge the results of all the input classifiers * @param baseClassifiers the list of base classifiers to ensemble */ public UpdatableStacking(UpdateableClassifier aggregatingClassifier, List<UpdateableClassifier> baseClassifiers) { if(baseClassifiers.size() < 2) throw new IllegalArgumentException("base classifiers must contain at least 2 elements, not " + baseClassifiers.size()); this.aggregatingClassifier = aggregatingClassifier; this.baseClassifiers = baseClassifiers; boolean allRegressors = aggregatingClassifier instanceof UpdateableRegressor; for(UpdateableClassifier cl : baseClassifiers) if(!(cl instanceof UpdateableRegressor)) allRegressors = false; if(allRegressors) { aggregatingRegressor = (UpdateableRegressor) aggregatingClassifier; baseRegressors = (List) baseClassifiers;//ugly type easure exploitation... } } /** * Creates a new Stacking classifier. * @param aggregatingClassifier the classifier used to merge the results of all the input classifiers * @param baseClassifiers the array of base classifiers to ensemble */ public UpdatableStacking(UpdateableClassifier aggregatingClassifier, UpdateableClassifier... baseClassifiers) { this(aggregatingClassifier, Arrays.asList(baseClassifiers)); } /** * Creates a new Stacking regressor * @param aggregatingRegressor the regressor used to merge the results of all the input classifiers * @param baseRegressors the list of base regressors to ensemble */ public UpdatableStacking(UpdateableRegressor aggregatingRegressor, List<UpdateableRegressor> baseRegressors) { this.aggregatingRegressor = aggregatingRegressor; this.baseRegressors = baseRegressors; boolean allClassifiers = aggregatingRegressor instanceof UpdateableClassifier; for(UpdateableRegressor reg : baseRegressors) if(!(reg instanceof UpdateableClassifier)) allClassifiers = false; if(allClassifiers) { aggregatingClassifier = (UpdateableClassifier) aggregatingRegressor; baseClassifiers = (List) baseRegressors;//ugly type easure exploitation... } } /** * Creates a new Stacking regressor. * @param aggregatingRegressor the regressor used to merge the results of all the input classifiers * @param baseRegressors the array of base regressors to ensemble */ public UpdatableStacking(UpdateableRegressor aggregatingRegressor, UpdateableRegressor... baseRegressors) { this(aggregatingRegressor, Arrays.asList(baseRegressors)); } /** * Copy constructor * @param toCopy the object to copy */ public UpdatableStacking(UpdatableStacking toCopy) { this.weightsPerModel = toCopy.weightsPerModel; if(toCopy.aggregatingClassifier != null) { this.aggregatingClassifier = toCopy.aggregatingClassifier.clone(); this.baseClassifiers = new ArrayList<UpdateableClassifier>(toCopy.baseClassifiers.size()); for(UpdateableClassifier bc : toCopy.baseClassifiers) this.baseClassifiers.add(bc.clone()); if(toCopy.aggregatingRegressor == toCopy.aggregatingClassifier)//supports both { aggregatingRegressor = (UpdateableRegressor) aggregatingClassifier; baseRegressors = (List) baseClassifiers;//ugly type easure exploitation... } } else//we are doing with regressors only { this.aggregatingRegressor = toCopy.aggregatingRegressor.clone(); this.baseRegressors = new ArrayList<UpdateableRegressor>(toCopy.baseRegressors.size()); for(UpdateableRegressor br : toCopy.baseRegressors) this.baseRegressors.add(br.clone()); } } @Override public CategoricalResults classify(DataPoint data) { return aggregatingClassifier.classify(getPredVecC(data)); } /** * Gets the predicted vector wrapped in a new DataPoint from a data point * assuming we are doing classification * @param data the data point to get the classifier from * @return the vector of predictions from each classifier */ private DataPoint getPredVecC(DataPoint data) { Vec w = new DenseVector(weightsPerModel*baseClassifiers.size()); if(weightsPerModel == 1) for(int i = 0; i < baseClassifiers.size(); i++) w.set(i, baseClassifiers.get(i).classify(data).getProb(0)*2-1); else { for(int i = 0; i < baseClassifiers.size(); i++) { CategoricalResults pred = baseClassifiers.get(i).classify(data); for(int j = 0; j < weightsPerModel; j++) w.set(i*weightsPerModel+j, pred.getProb(j)); } } return new DataPoint(w); } /** * Gets the predicted vector wrapped in a new DataPoint from a data point * assuming we are doing regression * @param data the data point to get the classifier from * @return the vector of predictions from each regressor */ private DataPoint getPredVecR(DataPoint data) { Vec w = new DenseVector(baseRegressors.size()); for (int i = 0; i < baseRegressors.size(); i++) w.set(i, baseRegressors.get(i).regress(data)); return new DataPoint(w); } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { final int C = predicting.getNumOfCategories(); weightsPerModel = C == 2 ? 1 : C; //set up all models, agregating gets different arugmetns since it gets the created input from the base models aggregatingClassifier.setUp(new CategoricalData[0], weightsPerModel*baseClassifiers.size(), predicting); for(UpdateableClassifier uc : baseClassifiers) uc.setUp(categoricalAttributes, numericAttributes, predicting); } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { //predate first, gives an unbiased udpdate for the aggregator aggregatingClassifier.update(getPredVecC(dataPoint), weight, targetClass); //now update the base models for(UpdateableClassifier uc : baseClassifiers) uc.update(dataPoint, targetClass); } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes) { weightsPerModel = 1; aggregatingRegressor.setUp(new CategoricalData[0], weightsPerModel*baseRegressors.size()); for(UpdateableRegressor ur : baseRegressors) ur.setUp(categoricalAttributes, numericAttributes); } @Override public void update(DataPoint dataPoint, double weight, double targetValue) { //predate first, gives an unbiased udpdate for the aggregator aggregatingRegressor.update(getPredVecR(dataPoint), weight, targetValue); //now update the base models for(UpdateableRegressor ur : baseRegressors) ur.update(dataPoint, targetValue); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { BaseUpdateableClassifier.trainEpochs(dataSet, this, 1); } @Override public boolean supportsWeightedData() { if(aggregatingClassifier != null) return aggregatingClassifier.supportsWeightedData(); else return aggregatingRegressor.supportsWeightedData(); } @Override public double regress(DataPoint data) { return aggregatingRegressor.regress(getPredVecR(data)); } @Override public void train(RegressionDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(RegressionDataSet dataSet) { BaseUpdateableRegressor.trainEpochs(dataSet, this, 1); } @Override public UpdatableStacking clone() { return new UpdatableStacking(this); } }
10,558
36.576512
131
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/boosting/Wagging.java
package jsat.classifiers.boosting; import java.util.Random; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import jsat.DataSet; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.distributions.ContinuousDistribution; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.parameters.Parameterized; import jsat.regression.RegressionDataSet; import jsat.regression.Regressor; import jsat.utils.FakeExecutor; import jsat.utils.SystemInfo; import jsat.utils.concurrent.ParallelUtils; import jsat.utils.random.RandomUtil; /** * Wagging is a meta-classifier that is related to {@link Bagging}. Instead * training on re-sampled data sets, it trains on randomly re-weighted data * sets. The weight of each point is selected at random from a specified * distribution, and set to zero if negative. * <br><br> * See: <a href="http://www.springerlink.com/index/L006M1614W023752.pdf"> * Bauer, E.,&amp;Kohavi, R. (1999). <i>An empirical comparison of voting * classification algorithms</i>: Bagging, boosting, and variants. Machine * learning, 38(1998), 1–38.</a> * * @author Edward Raff */ public class Wagging implements Classifier, Regressor, Parameterized { private static final long serialVersionUID = 4999034730848794619L; private ContinuousDistribution dist; private int iterations; private Classifier weakL; private Regressor weakR; private CategoricalData predicting; private Classifier[] hypotsL; private Regressor[] hypotsR; /** * Creates a new Wagging classifier * @param dist the distribution to select weights from * @param weakL the weak learner to use * @param iterations the number of iterations to perform */ public Wagging(ContinuousDistribution dist, Classifier weakL, int iterations) { setDistribution(dist); setIterations(iterations); setWeakLearner(weakL); } /** * Creates a new Wagging regressor * @param dist the distribution to select weights from * @param weakR the weak learner to use * @param iterations the number of iterations to perform */ public Wagging(ContinuousDistribution dist, Regressor weakR, int iterations) { setDistribution(dist); setIterations(iterations); setWeakLearner(weakR); } /** * Copy constructor * @param clone the one to clone */ protected Wagging(Wagging clone) { this.dist = clone.dist.clone(); this.iterations = clone.iterations; if(clone.weakL != null) setWeakLearner(clone.weakL.clone()); if(clone.weakR != null) setWeakLearner(clone.weakR.clone()); if(clone.predicting != null) this.predicting = clone.predicting.clone(); if(clone.hypotsL != null) { hypotsL = new Classifier[clone.hypotsL.length]; for(int i = 0; i < hypotsL.length; i++) hypotsL[i] = clone.hypotsL[i].clone(); } if(clone.hypotsR != null) { hypotsR = new Regressor[clone.hypotsR.length]; for(int i = 0; i < hypotsR.length; i++) hypotsR[i] = clone.hypotsR[i].clone(); } } /** * Sets the weak learner used for classification. If it also supports * regressions that will be set as well. * @param weakL the weak learner to use */ public void setWeakLearner(Classifier weakL) { if(weakL == null) throw new NullPointerException(); this.weakL = weakL; if(weakL instanceof Regressor) this.weakR = (Regressor) weakL; } /** * Returns the weak learner used for classification. * @return the weak learner used for classification. */ public Classifier getWeakClassifier() { return weakL; } /** * Sets the weak learner used for regressions . If it also supports * classification that will be set as well. * @param weakR the weak learner to use */ public void setWeakLearner(Regressor weakR) { if(weakR == null) throw new NullPointerException(); this.weakR = weakR; if(weakR instanceof Classifier) this.weakL = (Classifier) weakR; } /** * Returns the weak learner used for regression * @return the weak learner used for regression */ public Regressor getWeakRegressor() { return weakR; } /** * Sets the number of iterations to create weak learners * @param iterations the number of iterations to perform */ public void setIterations(int iterations) { if(iterations < 1) throw new ArithmeticException("The number of iterations must be positive"); this.iterations = iterations; } /** * Returns the number of iterations to create weak learners * @return the number of iterations to perform */ public int getIterations() { return iterations; } /** * Sets the distribution to select the random weights from * @param dist the distribution to use */ public void setDistribution(ContinuousDistribution dist) { if(dist == null) throw new NullPointerException(); this.dist = dist; } /** * Returns the distribution used for weight sampling * @return the distribution used */ public ContinuousDistribution getDistribution() { return dist; } /** * Fills a subset of the array */ private class WagFill implements Runnable { int start; int end; DataSet ds; Random rand; CountDownLatch latch; public WagFill(int start, int end, DataSet ds, Random rand, CountDownLatch latch) { this.start = start; this.end = end; this.ds = ds.shallowClone(); this.rand = rand; this.latch = latch; //point at different objects so we can adjsut weights independently for(int i = 0; i < this.ds.size(); i++) { DataPoint dp = this.ds.getDataPoint(i); this.ds.setDataPoint(i, new DataPoint(dp.getNumericalValues(), dp.getCategoricalValues(), dp.getCategoricalData())); } } @Override public void run() { if (ds instanceof ClassificationDataSet) { ClassificationDataSet cds = (ClassificationDataSet) ds; for (int i = start; i < end; i++) { for (int j = 0; j < ds.size(); j++) { double newWeight = Math.max(1e-6, dist.invCdf(rand.nextDouble())); cds.setWeight(j, newWeight); } Classifier hypot = weakL.clone(); hypot.train(cds); hypotsL[i] = hypot; } } else if(ds instanceof RegressionDataSet) { RegressionDataSet rds = (RegressionDataSet) ds; for (int i = start; i < end; i++) { for (int j = 0; j < ds.size(); j++) ds.setWeight(i, Math.max(1e-6, dist.invCdf(rand.nextDouble()))); Regressor hypot = weakR.clone(); hypot.train(rds); hypotsR[i] = hypot; } } else throw new RuntimeException("BUG: please report"); latch.countDown(); } } private void performTraining(boolean parallel, DataSet dataSet) { ExecutorService threadPool = ParallelUtils.getNewExecutor(parallel); int chunkSize = iterations/SystemInfo.LogicalCores; int extra = iterations%SystemInfo.LogicalCores; int used = 0; Random rand = RandomUtil.getRandom(); CountDownLatch latch = new CountDownLatch(chunkSize > 0 ? SystemInfo.LogicalCores : extra); while(used < iterations) { int start = used; int end = start+chunkSize; if(extra-- > 0) end++; used = end; threadPool.submit(new WagFill(start, end, dataSet, new Random(rand.nextInt()), latch)); } try { latch.await(); } catch (InterruptedException ex) { throw new FailedToFitException(ex); } finally { threadPool.shutdownNow(); } } @Override public CategoricalResults classify(DataPoint data) { if(hypotsL == null) throw new UntrainedModelException("Model has not been trained for classification"); CategoricalResults results = new CategoricalResults(predicting.getNumOfCategories()); for(Classifier hypot : hypotsL) results.incProb(hypot.classify(data).mostLikely(), 1); results.normalize(); return results; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { if(weakL == null) throw new FailedToFitException("No classification weak learner was provided"); predicting = dataSet.getPredicting(); hypotsL = new Classifier[iterations]; hypotsR = null; performTraining(parallel, dataSet); } @Override public boolean supportsWeightedData() { return false; } @Override public double regress(DataPoint data) { if(hypotsR == null) throw new UntrainedModelException("Model has not been trained for regression"); double avg = 0.0; for(Regressor hypot : hypotsR) avg += hypot.regress(data); avg /= hypotsR.length; return avg; } @Override public void train(RegressionDataSet dataSet, boolean parallel) { if(weakR == null) throw new FailedToFitException("No regression weak learner was provided"); hypotsL = null; hypotsR = new Regressor[iterations]; performTraining(parallel, dataSet); } @Override public Wagging clone() { return new Wagging(this); } }
10,669
29.485714
132
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/boosting/WaggingNormal.java
package jsat.classifiers.boosting; import jsat.classifiers.Classifier; import jsat.distributions.ContinuousDistribution; import jsat.distributions.Normal; import jsat.regression.Regressor; /** * Wagging using the {@link Normal} distribution. * * @author Edward Raff */ public class WaggingNormal extends Wagging { private static final long serialVersionUID = -4149453672311329863L; /** * Creates a new Wagging classifier * @param weakLearner the weak learner to use * @param interations the number of iterations to perform */ public WaggingNormal(Classifier weakLearner, int interations) { super(new Normal(1, 2), weakLearner, interations); } /** * Creates a new Wagging regressor * @param weakLearner the weak learner to use * @param interations the number of iterations to perform */ public WaggingNormal(Regressor weakLearner, int interations) { super(new Normal(1, 2), weakLearner, interations); } /** * Copy constructor * @param clone to copy */ protected WaggingNormal(Wagging clone) { super(clone); } @Override public ContinuousDistribution getDistribution() { return super.getDistribution(); } @Override public void setDistribution(ContinuousDistribution dist) { if(dist instanceof Normal) super.setDistribution(dist); else throw new RuntimeException("Only the Normal distribution is valid"); } /** * Sets the mean value used for the normal distribution * @param mean the new mean value */ public void setMean(double mean) { if(Double.isInfinite(mean) || Double.isNaN(mean)) throw new ArithmeticException("Mean must be a real number, not " + mean); ((Normal)getDistribution()).setMean(mean); } /** * Returns the mean value used for the normal distribution * @return the mean value used */ public double getMean() { return ((Normal)getDistribution()).mean(); } /** * Sets the standard deviations used for the normal distribution * @param devs the standard deviations to set */ public void setStandardDeviations(double devs) { if(devs <= 0 || Double.isInfinite(devs) || Double.isNaN(devs)) throw new ArithmeticException("The stnd devs must be a positive value"); ((Normal)getDistribution()).setStndDev(devs); } /** * Returns the standard deviation used for the normal distribution * @return the standard deviation used */ public double getStandardDeviations() { return ((Normal)getDistribution()).standardDeviation(); } @Override public WaggingNormal clone() { return new WaggingNormal(this); } }
2,871
25.592593
85
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/calibration/BinaryCalibration.java
package jsat.classifiers.calibration; import java.util.Collections; import java.util.List; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.classifiers.DataPointPair; import jsat.parameters.Parameter.ParameterHolder; import jsat.parameters.Parameterized; /** * This abstract class provides the frame work for an algorithm to perform * probability calibration based on the outputs of a base learning algorithm for * binary classification problems. * <br><br> * Calibration can be performed directly on output values, though it may cause * over-fitting. For this reason, the {@link CalibrationMode} may be set to an * alternative method. * <br><br> * The parameters include the calibration parameters, and any parameters that * would be returned by the base model. * * @author Edward Raff */ public abstract class BinaryCalibration implements Classifier, Parameterized { private static final long serialVersionUID = 2356311701854978890L; /** * The base classifier to train and calibrate the outputs of */ @ParameterHolder protected BinaryScoreClassifier base; /** * The number of CV folds */ protected int folds = 3; /** * The proportion of the data set to hold out for calibration */ protected double holdOut = 0.3; /** * The calibration mode to use */ protected CalibrationMode mode; /** * Creates a new Binary Calibration object * @param base the base learning algorithm * @param mode the calibration mode to use */ public BinaryCalibration(BinaryScoreClassifier base, CalibrationMode mode) { this.base = base; setCalibrationMode(mode); } /** * Controls how the scores are obtained for producing a "training set" to * calibrate the output of the underlying model. */ public static enum CalibrationMode { /** * The naive methods trains the classifier on the whole data set, and * then produces the scores for each training point. This may cause * over fitting. */ NAIVE, /** * The model will be trained by cross validation, using the specified * number of {@link #setCalibrationFolds(int) }. The default is 3 folds, * where the classifier will be trained on the folds not in the set, and * then produce scores for the unobserved test points in the held out * fold. */ CV, /** * The model will have a random {@link #setCalibrationHoldOut(double) * fraction} of the data set held out, and trained on the rest of the * data. The scores will then be produced for the held out data and used * for calibration. */ HOLD_OUT, } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { double[] deci = new double[dataSet.size()];//array of SVM decision values boolean[] label = new boolean[deci.length];//array of booleans: is the example labeled +1? int len = label.length; if (mode == CalibrationMode.CV) { List<ClassificationDataSet> foldList = dataSet.cvSet(folds); int pos = 0; for(int i = 0; i < foldList.size(); i++) { ClassificationDataSet test = foldList.get(i); ClassificationDataSet train = ClassificationDataSet.comineAllBut(foldList, i); base.train(train, parallel); for(int j = 0; j < test.size(); j++) { deci[pos] = base.getScore(test.getDataPoint(j)); label[pos] = test.getDataPointCategory(j) == 1; pos++; } } base.train(dataSet, parallel); } else if (mode == CalibrationMode.HOLD_OUT) { List<DataPointPair<Integer>> wholeSet = dataSet.getAsDPPList(); Collections.shuffle(wholeSet); int splitMark = (int) (wholeSet.size()*(1-holdOut)); ClassificationDataSet train = new ClassificationDataSet(wholeSet.subList(0, splitMark), dataSet.getPredicting()); ClassificationDataSet test = new ClassificationDataSet(wholeSet.subList(splitMark, wholeSet.size()), dataSet.getPredicting()); base.train(train, parallel); for(int i = 0; i < test.size(); i++) { deci[i] = base.getScore(test.getDataPoint(i)); label[i] = test.getDataPointCategory(i) == 1; } len = test.size(); base.train(dataSet, parallel); } else { base.train(dataSet, parallel); for (int i = 0; i < len; i++) { DataPoint dp = dataSet.getDataPoint(i); deci[i] = base.getScore(dp); label[i] = dataSet.getDataPointCategory(i) == 1; } } calibrate(label, deci, len); } /** * This method perform the model calibration on the outputs verse the class * labels. * @param label the set of labels, where {@code true} indicates the positive * class label, and {@code false} indicates the negative class label. * @param scores the score associated with each label from the learning * algorithm. * @param len the number of values (from zero) of the label and scores array * to use. This value may be less than the actual array size */ abstract protected void calibrate(boolean[] label, double[] scores, final int len); /** * If the calibration mode is set to {@link CalibrationMode#CV}, this * controls how many folds of cross validation will be used. The default is * 3. * @param folds the number of cross validation folds to perform */ public void setCalibrationFolds(int folds) { if(folds < 1) throw new IllegalArgumentException("Folds must be a positive value, not " + folds); this.folds = folds; } /** * Returns the number of cross validation folds to use * @return the number of cross validation folds to use */ public int getCalibrationFolds() { return folds; } /** * If the calibration mode is set to {@link CalibrationMode#HOLD_OUT}, this * what portion of the data set is randomly selected to be the hold out set. * The default is 0.3. * * @param holdOut the portion in (0, 1) to hold out */ public void setCalibrationHoldOut(double holdOut) { if(Double.isNaN(holdOut) || holdOut <= 0 || holdOut >= 1) throw new IllegalArgumentException("HoldOut must be in (0, 1), not " + holdOut); this.holdOut = holdOut; } /** * Returns the portion of the data set that will be held out for calibration * @return the portion of the data set that will be held out for calibration */ public double getCalibrationHoldOut() { return holdOut; } /** * Sets which calibration mode will be used during training * @param mode the calibration mode to use during training. */ public void setCalibrationMode(CalibrationMode mode) { this.mode = mode; } /** * Returns the calibration mode used during training * @return the calibration mode used during training */ public CalibrationMode getCalibrationMode() { return mode; } @Override abstract public BinaryCalibration clone(); }
7,807
32.947826
138
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/calibration/BinaryScoreClassifier.java
package jsat.classifiers.calibration; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; /** * Many algorithms linear a binary separation between two classes <i>A</i> and * <i>B</i> by representing the target labels with a {@code -1} ad {@code 1}. At * prediction, the output is a real valued number - where the sign indicates the * class label. This interface indicates that an algorithm conforms such * behavior, and that the "0" class corresponds to the {@code -1} label, and the * "1" class corresponds to the {@code 1} label. <br> * * @author Edward Raff */ public interface BinaryScoreClassifier extends Classifier { /** * Returns the numeric score for predicting a class of a given data point, * where the sign of the value indicates which class the data point is * predicted to belong to. * * @param dp the data point to predict the class label of * @return the score for the given data point */ public double getScore(DataPoint dp); @Override public BinaryScoreClassifier clone(); }
1,086
32.96875
80
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/calibration/IsotonicCalibration.java
package jsat.classifiers.calibration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import jsat.classifiers.CategoricalResults; import jsat.classifiers.DataPoint; /** * Isotonic Calibration is non-parametric, and only assumes that the underlying * distribution from negative to positive examples is strictly a non-decreasing * function. It will then attempt to model the distribution. This may over-fit * for small data sizes, and imposes an additional <i>O(log n)</i> search look * up when performing classification, where n is &lt;= the number of data points * in the data set. * <br><br> * Isotonic Calibration inherently creates non-adjacent bins of varying size. * Smooth transitions in output probability are created by simple linear * interpolation between bin values. * <br><br> * See: Niculescu-Mizil, A.,&amp;Caruana, R. (2005). <i>Predicting Good * Probabilities with Supervised Learning</i>. International Conference on * Machine Learning (pp. 625–632). Retrieved from * <a href="http://dl.acm.org/citation.cfm?id=1102430">here</a> * * @author Edward Raff */ public class IsotonicCalibration extends BinaryCalibration { private static final long serialVersionUID = -1295979238755262335L; private double[] outputs; private double[] scores; /** * Creates a new Isotonic Calibration object * @param base the base model to calibrate the outputs of * @param mode the calibration mode to use */ public IsotonicCalibration(BinaryScoreClassifier base, CalibrationMode mode) { super(base, mode); } /** * Copy constructor * @param toCopy the object to copy */ protected IsotonicCalibration(IsotonicCalibration toCopy) { super(toCopy.base.clone(), toCopy.mode); if (toCopy.outputs != null) this.outputs = Arrays.copyOf(toCopy.outputs, toCopy.outputs.length); if (toCopy.scores != null) this.scores = Arrays.copyOf(toCopy.scores, toCopy.scores.length); } private static class Point implements Comparable<Point> { public double weight; public double score; public double output; public double min, max; public Point(double score, double output) { this.weight = 1; min = max = this.score = score; this.output = output; } public void merge(Point next) { double newWeight = this.weight +next.weight; this.score = (this.weight*this.score + next.weight*next.score)/newWeight; this.output = (this.weight*this.output + next.weight*next.output)/newWeight; this.weight = newWeight; this.min = Math.min(this.min, next.min); this.max = Math.max(this.max, next.max); } public boolean nextViolates(Point next) { return this.output >= next.output; } @Override public int compareTo(Point o) { return Double.compare(score, o.score); } } @Override protected void calibrate(boolean[] label, double[] deci, int len) { List<Point> points = new ArrayList<Point>(len); for(int i = 0; i < len; i++) points.add(new Point(deci[i], label[i] ? 1 : 0)); Collections.sort(points); boolean violators = true; while(violators) { violators = false; for(int i = 0; i < points.size()-1; i++) { if(points.get(i).nextViolates(points.get(i+1))) { violators = true; points.get(i).merge(points.remove(i+1)); i--; } } } scores = new double[points.size()*2]; outputs = new double[points.size()*2]; int pos = 0; for(Point p : points) { scores[pos] = p.min; outputs[pos++] = p.output; scores[pos] = p.max; outputs[pos++] = p.output; } } @Override public IsotonicCalibration clone() { return new IsotonicCalibration(this); } @Override public CategoricalResults classify(DataPoint data) { double score = base.getScore(data); CategoricalResults cr = new CategoricalResults(2); int indx = Arrays.binarySearch(scores, score); if(indx < 0) indx = (-(indx) - 1); if(indx == scores.length) { double maxScore = scores[scores.length-1]; if(score > maxScore*3) cr.setProb(1, 1.0); else { double p = (maxScore*3-score)/(maxScore*2)*outputs[scores.length-1]; cr.setProb(0, 1-p); cr.setProb(1, p); } } else if(indx == 0) { double minScore = scores[0]; if(score < minScore/3) cr.setProb(0, 1.0); else { double p = (minScore-score)/(minScore-minScore/3)*outputs[0]; cr.setProb(0, 1-p); cr.setProb(1, p); } } else { double score0 = scores[indx-1]; double score1 = scores[indx]; if(score0 == score1) { cr.setProb(0, 1-outputs[indx]); cr.setProb(1, outputs[indx]); return cr; } double weight = (score1-score)/(score1-score0); double p = outputs[indx-1]*weight + outputs[indx]*(1-weight); cr.setProb(0, 1-p); cr.setProb(1, p); } return cr; } @Override public boolean supportsWeightedData() { return base.supportsWeightedData(); } }
6,031
28.714286
88
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/calibration/PlattCalibration.java
package jsat.classifiers.calibration; import jsat.classifiers.CategoricalResults; import jsat.classifiers.DataPoint; import static jsat.math.FastMath.*; /** * Platt Calibration essentially performs logistic regression on the output * scores of a model against their class labels. While first described for SVMs, * Platt's method can be used for any scoring algorithm in general. * <br><br> * See:<br> * <ul> * <li>Platt, J. C. (1999). <i>Probabilistic Outputs for Support Vector * Machines and Comparisons to Regularized Likelihood Methods</i>. Advances in * Large Margin Classifiers (pp. 61–74). MIT Press. Retrieved from * <a href="http://www.tu-harburg.de/ti6/lehre/seminarCI/slides/ws0506/SVMprob.pdf"> * here </a></li> * <li>Lin, H.-T., Lin, C.-J.,&amp;Weng, R. C. (2007). <i>A note on Platt’s * probabilistic outputs for support vector machines</i>. Machine learning, * 68(3), 267–276. Retrieved from * <a href="http://www.springerlink.com/index/8417V9235M561471.pdf">here</a></li> * <li>Niculescu-Mizil, A.,&amp;Caruana, R. (2005). <i>Predicting Good * Probabilities with Supervised Learning</i>. International Conference on * Machine Learning (pp. 625–632). Retrieved from * <a href="http://dl.acm.org/citation.cfm?id=1102430">here</a></li> * </ul> * @author Edward Raff */ public class PlattCalibration extends BinaryCalibration { private static final long serialVersionUID = 1099230240231262536L; private double A,B; private double maxIter = 100; private double minStep = 1e-10; private double sigma = 1e-12; /** * Creates a new Platt Calibration object * @param base the base model to calibrate the outputs of * @param mode the calibration mode to use */ public PlattCalibration(BinaryScoreClassifier base, CalibrationMode mode) { super(base, mode); } @Override public CategoricalResults classify(DataPoint data) { CategoricalResults cr = new CategoricalResults(2); double p_1 = 1/(1+exp(A*base.getScore(data)+B)); cr.setProb(0, 1-p_1); cr.setProb(1, p_1); return cr; } @Override protected void calibrate(boolean[] label, double[] deci, final int len) { int prior1 = 0;//number of positive examples for (boolean positive : label) if (positive) prior1++; final int prior0 = label.length - prior1;//number of negative examples final double hiTarget = (prior1 + 1.0) / (prior1 + 2.0); final double loTarget = 1 / (prior0 + 2.0); double[] t = new double[len]; for (int i = 0; i < len; i++) if (label[i]) t[i] = hiTarget; else t[i] = loTarget; A = 0.0; B = log((prior0 + 1.0) / (prior1 + 1.0)); double fval = 0.0; for(int i = 0; i < len; i++) { double fApB=deci[i]*A+B; if(fApB >= 0) fval += t[i]*fApB+log(1+exp(-fApB)); else fval += (t[i]-1)*fApB+log(1+exp(-fApB)); } for (int it = 0; it < maxIter; it++) { //Update Gradient and Hessian (use H’ = H + sigma I) double h11 = sigma, h22 = sigma; double h21 = 0, g1 = 0, g2 = 0.0; for(int i = 0; i < len; i++) { double fApB = deci[i] * A + B, p, q; if (fApB >= 0) { p = exp(-fApB) / (1.0 + exp(-fApB)); q = 1.0 / (1.0 + exp(-fApB)); } else { p = 1.0 / (1.0 + exp(fApB)); q = exp(fApB) / (1.0 + exp(fApB)); } double d2 = p * q; h11 += deci[i] * deci[i] * d2; h22 += d2; h21 += deci[i] * d2; double d1 = t[i] - p; g1 += deci[i] * d1; g2 += d1; } if (Math.abs(g1)<1e-5 && Math.abs(g2)<1e-5) //Stopping criteria break; //Compute modified Newton directions double det = h11 * h22 - h21 * h21; double dA = -(h22 * g1 - h21 * g2) / det; double dB = -(-h21 * g1 + h11 * g2) / det; double gd = g1 * dA + g2 * dB; double stepsize = 1; while (stepsize >= minStep)//Line search { double newA = A + stepsize * dA, newB = B + stepsize * dB, newf = 0.0; for (int i = 0; i < len; i++) { double fApB = deci[i] * newA + newB; if (fApB >= 0) newf += t[i] * fApB + log(1 + exp(-fApB)); else newf += (t[i] - 1) * fApB + log(1 + exp(fApB)); } if (newf < fval + 0.0001 * stepsize * gd) { A = newA; B = newB; fval = newf; break; //Sufficient decrease satisfied } else stepsize /= 2.0; } if (stepsize < minStep) break; } } @Override public boolean supportsWeightedData() { return base.supportsWeightedData(); } @Override public PlattCalibration clone() { PlattCalibration clone = new PlattCalibration(base.clone(), this.mode); clone.A = this.A; clone.B = this.B; clone.folds = this.folds; clone.holdOut = this.holdOut; clone.sigma = this.sigma; clone.minStep = this.minStep; clone.maxIter = this.maxIter; return clone; } }
5,926
31.565934
86
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/evaluation/AUC.java
package jsat.classifiers.evaluation; import java.util.ArrayList; import java.util.Collections; import java.util.List; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; /** * Computes the Area Under the ROC Curve as an evaluation of classification * scores. The AUC takes <i>O(n log n)</i> time for <i>n</i> predictions and is * only valid for binary classification problems. * * @author Edward Raff */ public class AUC implements ClassificationScore { private static final long serialVersionUID = 6882234590870560718L; private static class Tuple implements Comparable<Tuple> { /** * larger means positive class, smaller means negative class */ public double score; /** * Does this point truly belong to the positive class */ public boolean positiveClass; public double weight; public Tuple(double score, boolean positiveClass, double weight) { this.score = score; this.positiveClass = positiveClass; this.weight = weight; } @Override public int compareTo(Tuple o) { return Double.compare(this.score, o.score); } } private List<Tuple> scores; /** * Creates a new AUC object */ public AUC() { } /** * Copy constructor * @param toClone the object to copy */ public AUC(AUC toClone) { if(toClone.scores != null) { this.scores = new ArrayList<Tuple>(toClone.scores); for(int i = 0; i < this.scores.size(); i++) this.scores.set(i, new Tuple(this.scores.get(i).score, this.scores.get(i).positiveClass, this.scores.get(i).weight)); } } @Override public void addResult(CategoricalResults prediction, int trueLabel, double weight) { scores.add(new Tuple(prediction.getProb(0), trueLabel == 0, weight)); } @Override public void addResults(ClassificationScore other) { AUC otherObj = (AUC) other; this.scores.addAll(otherObj.scores); } @Override public void prepare(CategoricalData toPredict) { if(toPredict.getNumOfCategories() != 2) throw new IllegalArgumentException("AUC is only defined for binary classification problems"); scores = new ArrayList<Tuple>(); } @Override public double getScore() { Collections.sort(scores); double pos = 0, neg = 0, sum = 0; for (Tuple i : scores) if (i.positiveClass) pos += i.weight; else neg += i.weight; double posLeft = pos; for (Tuple i : scores) if (i.positiveClass)//oh no, saw the wrong thing posLeft -= i.weight; else//posLeft instances of the positive class were correctly above the negative class sum += posLeft; return sum / (double) (pos * neg); } @Override public boolean lowerIsBetter() { return false; } @Override public boolean equals(Object obj) { if(this.getClass().isAssignableFrom(obj.getClass()) && obj.getClass().isAssignableFrom(this.getClass())) { return true; } return false; } @Override public int hashCode() { return getName().hashCode(); } @Override public AUC clone() { return new AUC(this); } @Override public String getName() { return "AUC"; } }
3,658
23.557047
133
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/evaluation/Accuracy.java
package jsat.classifiers.evaluation; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; /** * Evaluates a classifier based on its accuracy in predicting the correct class. * * @author Edward Raff */ public class Accuracy implements ClassificationScore { private static final long serialVersionUID = 397690693205481128L; private double correct, total; public Accuracy() { } public Accuracy(Accuracy toClone) { this.correct = toClone.correct; this.total = toClone.total; } @Override public void addResult(CategoricalResults prediction, int trueLabel, double weight) { if(prediction.mostLikely() == trueLabel) correct += weight; total += weight; } @Override public void addResults(ClassificationScore other) { Accuracy otherObj = (Accuracy) other; this.correct += otherObj.correct; this.total += otherObj.total; } @Override public void prepare(CategoricalData toPredict) { correct = 0; total = 0; } @Override public double getScore() { return correct/total; } @Override public boolean lowerIsBetter() { return false; } @Override public boolean equals(Object obj) { if(obj instanceof Accuracy) { return true; } return false; } @Override public int hashCode() { return getName().hashCode(); } @Override public Accuracy clone() { return new Accuracy(this); } @Override public String getName() { return "Accuracy"; } }
1,721
17.923077
86
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/evaluation/BalancedAccuracy.java
package jsat.classifiers.evaluation; import java.util.Arrays; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; /** * This class implements the Balanced Accuracy metric. If the number of test * points has an equal total weight for each class, then Balanced Accuracy * returns the same result as {@link Accuracy}. If not, this class will re-scale * the importance of errors in each class and return an accuracy score as if * each class had equal total weight. That is to say, it evaluates the data as * if it was balanced. <br> * <br> * See: Brodersen, K. H., Ong, C. S., Stephan, K. E., & * Buhmann, J. M. (2010). * <i>The Balanced Accuracy and Its Posterior Distribution</i>. In Proceedings * of the 2010 20th International Conference on Pattern Recognition (pp. * 3121–3124). Washington, DC, USA: IEEE Computer Society. * <a href="http://doi.org/10.1109/ICPR.2010.764">http://doi.org/10.1109/ICPR.2010.764</a> * @author Edward Raff */ public class BalancedAccuracy implements ClassificationScore { private int classes; double[] class_correct; double[] total_class_weight; public BalancedAccuracy() { } public BalancedAccuracy(BalancedAccuracy toClone) { this.classes = toClone.classes; if(toClone.class_correct != null) this.class_correct = Arrays.copyOf(toClone.class_correct, toClone.class_correct.length); if(toClone.total_class_weight != null) this.total_class_weight = Arrays.copyOf(toClone.total_class_weight, toClone.total_class_weight.length); } @Override public double getScore() { double score = 0; for(int i = 0; i < classes; i++) { if(total_class_weight[i] > 1e-15) score += class_correct[i]/total_class_weight[i]; else score += 1; } score /= classes; return score; } @Override public boolean lowerIsBetter() { return false; } @Override public BalancedAccuracy clone() { return new BalancedAccuracy(this); } @Override public String getName() { return "BalancedAccuracy"; } @Override public int hashCode() { return getName().hashCode(); } @Override public boolean equals(Object obj) { if(this.getClass().isAssignableFrom(obj.getClass()) && obj.getClass().isAssignableFrom(this.getClass())) { return true; } return false; } @Override public void prepare(CategoricalData toPredict) { classes = toPredict.getNumOfCategories(); total_class_weight = new double[classes]; class_correct = new double[classes]; } @Override public void addResult(CategoricalResults prediction, int trueLabel, double weight) { total_class_weight[trueLabel] += weight; if(prediction.mostLikely() == trueLabel) class_correct[trueLabel] += weight; } @Override public void addResults(ClassificationScore other) { if(other instanceof BalancedAccuracy) { BalancedAccuracy o = (BalancedAccuracy) other; for(int i = 0; i < classes; i++) { this.class_correct[i] += o.class_correct[i]; this.total_class_weight[i] += o.total_class_weight[i]; } } } }
3,497
26.761905
115
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/evaluation/ClassificationScore.java
package jsat.classifiers.evaluation; import java.io.Serializable; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; /** * This interface defines the contract for evaluating or "scoring" the results * on a classification problem. <br> * <br> * All classification scores must override the {@link #equals(java.lang.Object)} * and {@link #hashCode() } methods. If a score has parameters, different * objects with different parameters must not be equal. However, different * objects with the same parameters must be equal <i>even if their internal * states are different</i> * * @author Edward Raff */ public interface ClassificationScore extends Serializable { /** * Prepares this score to predict on the given input * @param toPredict the class label information that will be evaluated */ public void prepare(CategoricalData toPredict); /** * Adds the given result to the score * @param prediction the prediction for the data point * @param trueLabel the true label for the data point * @param weight the weigh to assign to the data point */ public void addResult(CategoricalResults prediction, int trueLabel, double weight); /** * The score contained in <i>this</i> object is augmented with the results * already accumulated in the {@code other} object. This does not result in * an averaging, but alters the current object to have the same score it * would have had if all the results were originally inserted into <i>this * </i> object. <br> * <br> * This method is only required to work if {@code other} if of the same * class as {@code this} object. * * @param other the object to add the results from */ public void addResults(ClassificationScore other); /** * Computes the score for the results that have been enrolled via * {@link #addResult(jsat.classifiers.CategoricalResults, int, double) } * * @return the score for the current results */ public double getScore(); /** * Returns {@code true} if a lower score is better, or {@code false} if a * higher score is better * @return {@code true} if a lower score is better */ public boolean lowerIsBetter(); @Override public boolean equals(Object obj); @Override public int hashCode(); public ClassificationScore clone(); /** * Returns the name to present for this score * @return the score name */ public String getName(); }
2,601
32.358974
87
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/evaluation/F1Score.java
package jsat.classifiers.evaluation; /** * The F1 score is the harmonic mean of {@link Precision} and * {@link Recall}. This score is only valid for binary * classification problems. * * @author Edward Raff */ public class F1Score extends SimpleBinaryClassMetric { private static final long serialVersionUID = -6192302685766444921L; public F1Score() { super(); } public F1Score(F1Score toClone) { super(toClone); } @Override public double getScore() { return 2*tp/(2*tp+fp+fn); } @Override public boolean equals(Object obj) { if(this.getClass().isAssignableFrom(obj.getClass()) && obj.getClass().isAssignableFrom(this.getClass())) { return true; } return false; } @Override public int hashCode() { return getName().hashCode(); } @Override public F1Score clone() { return new F1Score(this); } @Override public String getName() { return "F1 Score"; } }
1,078
16.688525
112
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/evaluation/FbetaScore.java
package jsat.classifiers.evaluation; /** * The F<sub>&beta;</sub> score is the generalization of {@link F1Score}, where * &beta; indicates the level of preference for precision over recall. * This score is only valid for binary classification problems. * * @author Edward Raff */ public class FbetaScore extends SimpleBinaryClassMetric { private static final long serialVersionUID = -7530404462591303694L; private double beta; /** * Creates a new F<sub>&beta;</sub> score * @param beta the weight to apply to precision over recall, must be in (0, * &infin;) */ public FbetaScore(double beta) { super(); if(beta <= 0 || Double.isInfinite(beta) || Double.isNaN(beta)) throw new IllegalArgumentException("beta must be in (0, inf), not " + beta); this.beta = beta; } /** * Copy constructor * @param toClone the object to copy */ public FbetaScore(FbetaScore toClone) { super(toClone); this.beta = toClone.beta; } @Override public double getScore() { final double betaSqrd = beta*beta; return (1+betaSqrd)*tp/((1+betaSqrd)*tp+fp+betaSqrd*fn); } @Override public boolean equals(Object obj) { if(this.getClass().isAssignableFrom(obj.getClass()) && obj.getClass().isAssignableFrom(this.getClass())) { return this.beta == ((FbetaScore)obj).beta; } return false; } @Override public int hashCode() { return new Double(beta).hashCode(); } @Override public FbetaScore clone() { return new FbetaScore(this); } @Override public String getName() { return "F beta(" + beta + ") Score"; } }
1,796
22.96
112
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/evaluation/Kappa.java
package jsat.classifiers.evaluation; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.linear.DenseMatrix; import jsat.linear.Matrix; /** * Evaluates a classifier based on the Kappa statistic. * * @author Edward Raff */ public class Kappa implements ClassificationScore { private static final long serialVersionUID = -1684937057234736715L; private Matrix errorMatrix; public Kappa() { } public Kappa(Kappa toClone) { if(toClone.errorMatrix != null) this.errorMatrix = toClone.errorMatrix.clone(); } @Override public void addResult(CategoricalResults prediction, int trueLabel, double weight) { errorMatrix.increment(prediction.mostLikely(), trueLabel, weight); } @Override public void addResults(ClassificationScore other) { Kappa otherObj = (Kappa) other; if(otherObj.errorMatrix == null) return; if(this.errorMatrix == null) throw new RuntimeException("KappaScore has not been prepared"); this.errorMatrix.mutableAdd(otherObj.errorMatrix); } @Override public void prepare(CategoricalData toPredict) { int N = toPredict.getNumOfCategories(); errorMatrix = new DenseMatrix(N, N); } @Override public double getScore() { double[] rowTotals = new double[errorMatrix.rows()]; double[] colTotals = new double[errorMatrix.rows()]; for(int i = 0; i < errorMatrix.rows(); i++) { rowTotals[i] = errorMatrix.getRowView(i).sum(); colTotals[i] = errorMatrix.getColumnView(i).sum(); } double chanceAgreement = 0; double accuracy = 0; double totalCount = 0; for(int i = 0; i < rowTotals.length; i++) { chanceAgreement += rowTotals[i]*colTotals[i]; totalCount += rowTotals[i]; accuracy += errorMatrix.get(i, i); } chanceAgreement /= totalCount*totalCount; accuracy /= totalCount; return (accuracy-chanceAgreement)/(1-chanceAgreement); } @Override public boolean equals(Object obj) { if(this.getClass().isAssignableFrom(obj.getClass()) && obj.getClass().isAssignableFrom(this.getClass())) { return true; } return false; } @Override public int hashCode() { return getName().hashCode(); } @Override public boolean lowerIsBetter() { return false; } @Override public Kappa clone() { return new Kappa(this); } @Override public String getName() { return "Kappa"; } }
2,769
23.298246
112
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/evaluation/LogLoss.java
package jsat.classifiers.evaluation; import java.util.Arrays; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; /** * This computes the multi-class Log Loss<br> * - 1/N <big>&Sigma;</big><sub>&forall; i &isin; N</sub> log(p<sub>i, y</sub>) * <br> * <br> * Where <i>N</i> is the number of data points and <i>p<sub>i, y</sub></i> is * the estimated probability of the true class label. The lower the loss score, * the better. * <br><br> * When <i>p<sub>i, y</sub></i> = 0 the log loss is uninformatively forced to * &infin;, even if all other data points are perfectly correct. To avoid this a * small nudge factor is added. * * @author Edward Raff */ public class LogLoss implements ClassificationScore { private static final long serialVersionUID = 3123851772991293430L; private double loss; private double weightSum; private double nudge; /** * Creates a new Log Loss evaluation score */ public LogLoss() { this(1e-15); } /** * Creates a new Log Loss evaluation score * @param nudge the nudge value to avoid zero probabilities, must be non * negative and less than 0.1 */ public LogLoss(double nudge) { if(nudge < 0 || nudge >= 0.1) throw new IllegalArgumentException("nudge must be a small non-negative value in [0, 0.1) not " + nudge); this.nudge = nudge; } public LogLoss(LogLoss toClone) { this.loss = toClone.loss; this.weightSum = toClone.weightSum; this.nudge = toClone.nudge; } @Override public void addResult(CategoricalResults prediction, int trueLabel, double weight) { loss += weight * Math.log(Math.max(prediction.getProb(trueLabel), nudge)); weightSum += weight; } @Override public void addResults(ClassificationScore other) { LogLoss otherObj = (LogLoss) other; this.loss += otherObj.loss; this.weightSum += otherObj.weightSum; } @Override public void prepare(CategoricalData toPredict) { loss = 0; weightSum = 0; } @Override public double getScore() { return -loss/weightSum; } @Override public boolean equals(Object obj) { if(this.getClass().isAssignableFrom(obj.getClass()) && obj.getClass().isAssignableFrom(this.getClass())) { return ((LogLoss)obj).nudge == this.nudge; } return false; } @Override public int hashCode() { return Arrays.hashCode(new double[]{nudge}); } @Override public boolean lowerIsBetter() { return true; } @Override public LogLoss clone() { return new LogLoss(this); } @Override public String getName() { return "Log Loss"; } }
2,887
23.066667
116
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/evaluation/MatthewsCorrelationCoefficient.java
package jsat.classifiers.evaluation; /** * Evaluates a classifier based on Mathews Correlation Coefficient * * @author Edward Raff */ public class MatthewsCorrelationCoefficient extends SimpleBinaryClassMetric { private static final long serialVersionUID = 7102318546460007008L; public MatthewsCorrelationCoefficient() { super(); } public MatthewsCorrelationCoefficient(MatthewsCorrelationCoefficient toClone) { super(toClone); } @Override public double getScore() { double denom = (tp+fp)*(tp+fn)*(tn+fp)*(tn+fn); if(denom <= 1e-16) return 0; return (tp*tn-fp*fn)/Math.sqrt(denom); } @Override public boolean equals(Object obj) { if(this.getClass().isAssignableFrom(obj.getClass()) && obj.getClass().isAssignableFrom(this.getClass())) { return true; } return false; } @Override public int hashCode() { return getName().hashCode(); } @Override public MatthewsCorrelationCoefficient clone() { return new MatthewsCorrelationCoefficient(this); } @Override public String getName() { return "Matthews Correlation Coefficient"; } }
1,276
19.596774
112
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/evaluation/Precision.java
package jsat.classifiers.evaluation; /** * Evaluates a classifier based on the Precision, where the class of index 0 * is considered the positive class. This score is only valid for binary * classification problems. * * @author Edward Raff */ public class Precision extends SimpleBinaryClassMetric { private static final long serialVersionUID = 7046590252900909918L; public Precision() { super(); } public Precision(Precision toClone) { super(toClone); } @Override public double getScore() { return tp/(tp+fp); } @Override public boolean equals(Object obj) { if(this.getClass().isAssignableFrom(obj.getClass()) && obj.getClass().isAssignableFrom(this.getClass())) { return true; } return false; } @Override public int hashCode() { return getName().hashCode(); } @Override public Precision clone() { return new Precision(this); } @Override public String getName() { return "Precision"; } }
1,116
17.311475
112
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/evaluation/Recall.java
package jsat.classifiers.evaluation; /** * Evaluates a classifier based on the Recall rate, where the class of index 0 * is considered the positive class. This score is only valid for binary * classification problems. * * @author Edward Raff */ public class Recall extends SimpleBinaryClassMetric { private static final long serialVersionUID = 4832185425203972017L; /** * Creates a new Recall evaluator */ public Recall() { super(); } /** * Copy constructor * @param toClone the object to copy */ public Recall(Recall toClone) { super(toClone); } @Override public double getScore() { return tp/(tp+fn); } @Override public boolean equals(Object obj) { if(this.getClass().isAssignableFrom(obj.getClass()) && obj.getClass().isAssignableFrom(this.getClass())) { return true; } return false; } @Override public int hashCode() { return getName().hashCode(); } @Override public Recall clone() { return new Recall(this); } @Override public String getName() { return "Recall"; } }
1,232
17.132353
112
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/evaluation/SimpleBinaryClassMetric.java
package jsat.classifiers.evaluation; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; /** * This is a base class for scores that can be computed from simple counts of * the true positives, true negatives, false positives, and false negatives. The * class with index zero will be considered the positive class and the class * with the first index will be the negative class. <br> * <br> * By default this class assumes higher scores are better * * @author Edward Raff */ public abstract class SimpleBinaryClassMetric implements ClassificationScore { private static final long serialVersionUID = -84479984342547212L; /** * true positives */ protected double tp; /** * true negatives */ protected double tn; /** * false positives */ protected double fp; /** * false negatives */ protected double fn; public SimpleBinaryClassMetric() { } public SimpleBinaryClassMetric(SimpleBinaryClassMetric toClone) { this.tp = toClone.tp; this.tn = toClone.tn; this.fp = toClone.fp; this.fn = toClone.fn; } @Override public void addResult(CategoricalResults prediction, int trueLabel, double weight) { int pred = prediction.mostLikely(); if(pred == trueLabel) if(pred == 0) tp += weight; else tn += weight; else { if(pred == 0) fp += weight; else fn += weight; } } @Override public void prepare(CategoricalData toPredict) { tp = tn = fp = fn = 0; } @Override public void addResults(ClassificationScore other) { SimpleBinaryClassMetric otherObj = (SimpleBinaryClassMetric) other; this.tp += otherObj.tp; this.tn += otherObj.tn; this.fp += otherObj.fp; this.fn += otherObj.fn; } @Override abstract public double getScore(); @Override public boolean lowerIsBetter() { return false; } @Override abstract public SimpleBinaryClassMetric clone(); }
2,216
21.85567
86
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/imbalance/BorderlineSMOTE.java
/* * Copyright (C) 2017 Edward Raff * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package jsat.classifiers.imbalance; import java.util.ArrayList; import java.util.List; import java.util.Random; import java.util.concurrent.ExecutorService; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.classifiers.DataPointPair; import jsat.exceptions.FailedToFitException; import jsat.linear.DenseVector; import jsat.linear.Vec; import jsat.linear.VecPaired; import jsat.linear.distancemetrics.DistanceMetric; import jsat.linear.distancemetrics.EuclideanDistance; import jsat.linear.vectorcollection.DefaultVectorCollection; import jsat.linear.vectorcollection.VectorCollection; import jsat.linear.vectorcollection.VectorCollectionUtils; import jsat.utils.IntList; import jsat.utils.ListUtils; import jsat.utils.concurrent.ParallelUtils; import jsat.utils.random.RandomUtil; /** * This class implements the Borderline extension of the {@link SMOTE} algorithm * for dealing with class imbalance. SMOTE over-samples from the minority class * at random points in the space. Borderline smote attempts to estimate which * points are on the border of the class bounder, and over-samples only from the * points on the boarder. Boarderline-SMOTE can also choose to * {@link #setMajorityInterpolation(boolean) perform interpolation using samples for the majority class}, * which can sometimes improve performance. The border is impacted by changes to * the number of {@link #setSmoteNeighbors(int) neighbors} used. In the rare * event that a boarder can't be estimated, this implementation will fall back * to standard SMOTE.<br> * This implementation extends the original SMOTE algorithm to the * multi-class case.<br> * <br> * See: Han, H., Wang, W.-Y., & Mao, B.-H. (2005). Borderline-SMOTE: A New * Over-sampling Method in Imbalanced Data Sets Learning. In Proceedings of the * 2005 International Conference on Advances in Intelligent Computing - Volume * Part I (pp. 878–887). Berlin, Heidelberg: Springer-Verlag. * <a href="http://doi.org/10.1007/11538059_91">DOI:10.1007/11538059_91</a> * @author Edward Raff */ public class BorderlineSMOTE extends SMOTE { private boolean majorityInterpolation; /** * Creates a new Borderline-SMOTE model that will over-sample the minority * classes so that there is a balanced number of data points in each class. * It will not use majority interpolation. * * @param baseClassifier the base classifier to use after the SMOTEing is * done. */ public BorderlineSMOTE(Classifier baseClassifier) { this(baseClassifier, false); } /** * Creates a new Borderline-SMOTE model that will over-sample the minority * classes so that there is a balanced number of data points in each class. * * @param baseClassifier the base classifier to use after the SMOTEing is * done. * @param majorityInterpolation {@code true} if synthetic examples should * use the majority class as well, or {@code false} to use only the minority * class. */ public BorderlineSMOTE(Classifier baseClassifier, boolean majorityInterpolation) { this(baseClassifier, new EuclideanDistance(), majorityInterpolation); } /** * Creates a new Borderline-SMOTE model that will over-sample the minority * classes so that there is a balanced number of data points in each class. * * @param baseClassifier the base classifier to use after the SMOTEing is * done. * @param dm the distance metric to use for determining nearest neighbors * @param majorityInterpolation {@code true} if synthetic examples should * use the majority class as well, or {@code false} to use only the minority * class. */ public BorderlineSMOTE(Classifier baseClassifier, DistanceMetric dm, boolean majorityInterpolation) { this(baseClassifier, dm, 1.0, majorityInterpolation); } /** * Creates a new Borderline-SMOTE model. * * @param baseClassifier the base classifier to use after the SMOTEing is * done. * @param dm the distance metric to use for determining nearest neighbors * @param targetRatio the desired ratio of samples for each class with respect to the majority class. * @param majorityInterpolation {@code true} if synthetic examples should * use the majority class as well, or {@code false} to use only the minority * class. */ public BorderlineSMOTE(Classifier baseClassifier, DistanceMetric dm, double targetRatio, boolean majorityInterpolation) { this(baseClassifier, dm, 5, targetRatio, majorityInterpolation); } /** * Creates a new SMOTE object * * @param baseClassifier the base classifier to use after the SMOTEing is * done. * @param dm the distance metric to use for determining nearest neighbors * @param smoteNeighbors the number of neighbors to look at when * interpolating points * @param targetRatio the desired ratio of samples for each class with * respect to the majority class. * @param majorityInterpolation {@code true} if synthetic examples should * use the majority class as well, or {@code false} to use only the minority * class. */ public BorderlineSMOTE(Classifier baseClassifier, DistanceMetric dm, int smoteNeighbors, double targetRatio, boolean majorityInterpolation) { super(baseClassifier, dm, smoteNeighbors, targetRatio); setMajorityInterpolation(majorityInterpolation); } /** * Copy constructor * @param toCopy the object to copy */ public BorderlineSMOTE(BorderlineSMOTE toCopy) { super((SMOTE)toCopy); this.majorityInterpolation = toCopy.majorityInterpolation; } /** * Sets whether the generation of synthetic samples can make use of the * majority samples (i.e., from other classes) or not. The use of majority * samples is "Borderline-SMOTE2" in the original paper. If majority samples * are not used, it is equivalent to "Borderline-SMOTE1". * * @param majorityInterpolation {@code true} if majority samples should be * used for interpolation, and {@code false} if only minority samples should * be used. */ public void setMajorityInterpolation(boolean majorityInterpolation) { this.majorityInterpolation = majorityInterpolation; } /** * * @return {@code true} if majority samples should be * used for interpolation, and {@code false} if only minority samples should * be used. */ public boolean isMajorityInterpolation() { return majorityInterpolation; } @Override public void train(final ClassificationDataSet dataSet, boolean parallel) { if(dataSet.getNumCategoricalVars() != 0) throw new FailedToFitException("SMOTE only works with numeric-only feature values"); List<Vec> vAll = dataSet.getDataVectors(); IntList[] classIndex = new IntList[dataSet.getClassSize()]; for(int i = 0; i < classIndex.length; i++) classIndex[i] = new IntList(); for(int i = 0; i < dataSet.size(); i++) classIndex[dataSet.getDataPointCategory(i)].add(i); double[] priors = dataSet.getPriors(); Vec ratios = DenseVector.toDenseVec(priors).clone();//yes, make a copy - I want the priors around too! /** * How many samples does it take to reach parity with the majority class */ final int majorityNum = (int) (dataSet.size()*ratios.max()); ratios.mutableDivide(ratios.max()); final List<DataPointPair<Integer>> synthetics = new ArrayList<>(); //Put ALL the vectors intoa single VC paired with their class label VectorCollection<Vec> VC_all = new DefaultVectorCollection<>(dm, vAll, parallel); //Go through and perform oversampling of each class for(final int classID : ListUtils.range(0, dataSet.getClassSize())) { final int samplesNeeded = (int) (majorityNum * targetRatio - classIndex[classID].size()); if(samplesNeeded <= 0) continue; //collect the vectors we need to interpolate with final List<Vec> V_id = new ArrayList<>(); for(int i : classIndex[classID]) V_id.add(vAll.get(i)); VectorCollection<Vec> VC_id = new DefaultVectorCollection<>(dm, V_id, parallel); //Step 1. For every p ii =( 1,2,..., pnum) in the minority class P, //we calculate its m nearest neighbors from the whole training set T List<List<Integer>> allNeighbors = new ArrayList<>(); List<List<Double>> allDistances = new ArrayList<>(); VC_all.search(V_id, smoteNeighbors+1, allNeighbors, allDistances, parallel); /** * A list of the vectors for only the neighbors who were not members * of the same class. Used when majorityInterpolation is true */ final List<List<Vec>> otherClassSamples = new ArrayList<>(); if(majorityInterpolation) for(List<Integer> tmp : allNeighbors) otherClassSamples.add(new ArrayList<>(smoteNeighbors)); //Step 2. final IntList danger_id = new IntList(); for(int i = 0; i < VC_id.size(); i++) { int same_class = 0; List<Integer> neighors_of_i = allNeighbors.get(i); for(int j = 1; j < smoteNeighbors+1; j++) { if(classID == dataSet.getDataPointCategory(neighors_of_i.get(j))) same_class++; else { if(majorityInterpolation) otherClassSamples.get(i).add(VC_all.get(neighors_of_i.get(j))); } } //are you in the DANZER ZONE!? //ratio of how many "majority" examples vs minority //we treat any other class as the "majority" to generalize to the multi-class case //for binary, will be equivalent to original paper double sOm = 1.0-same_class/(double)smoteNeighbors; if(0.5 <= sOm && sOm < 1.0) danger_id.add(i); //else, you are either easily misclassified or easily classified - and thus skipped } //find all the nearest neighbors for each point so we know who to interpolate with List<List<Integer>> idNeighbors = new ArrayList<>(); List<List<Double>> idDistances = new ArrayList<>(); VC_id.search(VC_id, smoteNeighbors+1, idNeighbors, idDistances, parallel); ParallelUtils.run(parallel, samplesNeeded, (start, end)-> { Random rand = RandomUtil.getRandom(); List<DataPoint> local_new = new ArrayList<>(); for (int i = start; i < end; i++) { int sampleIndex; if (danger_id.isEmpty())//danger zeon was empty? Fall back to SMOTE style sampleIndex = i % V_id.size(); else sampleIndex = danger_id.getI(i % danger_id.size()); Vec vec_nn; //which of the neighbors should we use? //Shoulwe we interpolate withing class or outside of or class? boolean useOtherClass = rand.nextBoolean() && majorityInterpolation && !danger_id.isEmpty(); if (useOtherClass) { List<Vec> candidates = otherClassSamples.get(sampleIndex); vec_nn = candidates.get(rand.nextInt(candidates.size())); } else { int nn = rand.nextInt(smoteNeighbors) + 1;//index 0 is ourself vec_nn = VC_id.get(idNeighbors.get(sampleIndex).get(nn)); } double gap = rand.nextDouble(); if (useOtherClass) gap /= 2;//now in the range of [0, 0.5), so that the synthetic point is mostly of the minority class of interest // x ~ U(0, 1) //new = sample + x * diff //where diff = (sample - other) //equivalent to //new = sample * (x+1) + other * x Vec newVal = V_id.get(sampleIndex).clone(); newVal.mutableMultiply(gap + 1); newVal.mutableAdd(gap, vec_nn); local_new.add(new DataPoint(newVal)); } synchronized (synthetics) { for (DataPoint v : local_new) synthetics.add(new DataPointPair<>(v, classID)); } }); } ClassificationDataSet newDataSet = new ClassificationDataSet(ListUtils.mergedView(synthetics, dataSet.getAsDPPList()), dataSet.getPredicting()); baseClassifier.train(newDataSet, parallel); } @Override public BorderlineSMOTE clone() { return new BorderlineSMOTE(this); } }
14,309
41.844311
152
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/imbalance/SMOTE.java
/* * Copyright (C) 2017 Edward Raff * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package jsat.classifiers.imbalance; import java.util.ArrayList; import java.util.List; import java.util.Random; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.classifiers.DataPointPair; import jsat.exceptions.FailedToFitException; import jsat.linear.DenseVector; import jsat.linear.Vec; import jsat.linear.VecPaired; import jsat.linear.distancemetrics.DistanceMetric; import jsat.linear.distancemetrics.EuclideanDistance; import jsat.linear.vectorcollection.DefaultVectorCollection; import jsat.linear.vectorcollection.VectorCollection; import jsat.linear.vectorcollection.VectorCollectionUtils; import jsat.parameters.Parameter.ParameterHolder; import jsat.parameters.Parameterized; import jsat.utils.FakeExecutor; import jsat.utils.IntList; import jsat.utils.ListUtils; import jsat.utils.SystemInfo; import jsat.utils.concurrent.ParallelUtils; import jsat.utils.random.RandomUtil; /** * This class implements the Synthetic Minority Over-sampling TEchnique (SMOTE) * for dealing with class imbalance. It does this by over-sampling the minority * classes to bring their total count up to parity (or some target ratio) with * the majority class. This is done by interpolating between minority points and * their neighbors to create new synthetic points that are not present in the * current dataset. For this reason SMOTE only works with numeric feature * vectors.<br> * <br> * See: Chawla, N., Bowyer, K., Hall, L., & Kegelmeyer, P. (2002). SMOTE: * synthetic minority over-sampling technique. Artificial Intelligence Research, * 16, 321–357. Retrieved from <a href="http://arxiv.org/abs/1106.1813">here</a> * @author Edward Raff */ public class SMOTE implements Classifier, Parameterized { @ParameterHolder protected Classifier baseClassifier; protected DistanceMetric dm; protected int smoteNeighbors; protected double targetRatio; /** * Creates a new SMOTE model that will over-sample the minority classes so * that there is a balanced number of data points in each class.<br> * This implementation extends the original SMOTE algorithm to the * multi-class case. * * * @param baseClassifier the base classifier to use after the SMOTEing is * done. */ public SMOTE(Classifier baseClassifier) { this(baseClassifier, new EuclideanDistance()); } /** * Creates a new SMOTE model that will over-sample the minority classes so * that there is a balanced number of data points in each class. * * @param baseClassifier the base classifier to use after the SMOTEing is * done. * @param dm the distance metric to use for determining nearest neighbors */ public SMOTE(Classifier baseClassifier, DistanceMetric dm) { this(baseClassifier, dm, 1.0); } /** * Creates a new SMOTE model. * * @param baseClassifier the base classifier to use after the SMOTEing is * done. * @param dm the distance metric to use for determining nearest neighbors * @param targetRatio the desired ratio of samples for each class with respect to the majority class. */ public SMOTE(Classifier baseClassifier, DistanceMetric dm, double targetRatio) { this(baseClassifier, dm, 5, targetRatio); } /** * Creates a new SMOTE object * @param baseClassifier the base classifier to use after the SMOTEing is done. * @param dm the distance metric to use for determining nearest neighbors * @param smoteNeighbors the number of neighbors to look at when interpolating points * @param targetRatio the desired ratio of samples for each class with respect to the majority class. */ public SMOTE(Classifier baseClassifier, DistanceMetric dm, int smoteNeighbors, double targetRatio) { setBaseClassifier(baseClassifier); setDistanceMetric(dm); setSmoteNeighbors(smoteNeighbors); setTargetRatio(targetRatio); } /** * Copy constructor * @param toCopy the object to copy */ public SMOTE(SMOTE toCopy) { this.baseClassifier = toCopy.baseClassifier.clone(); this.dm = toCopy.dm.clone(); this.smoteNeighbors = toCopy.smoteNeighbors; this.targetRatio = toCopy.targetRatio; } /** * Sets the metric used to determine the nearest neighbors of each point. * @param dm the distance metric to use. */ public void setDistanceMetric(DistanceMetric dm) { this.dm = dm; } /** * * @return the distance metric to use */ public DistanceMetric getDistanceMetric() { return dm; } /** * Sets the number of neighbors that will be used to be candidates for * interpolation. The default value recommended in the original paper is 5. * * @param smoteNeighbors the number of candidate neighbors to select from * when creating synthetic data points. */ public void setSmoteNeighbors(int smoteNeighbors) { if(smoteNeighbors < 1) throw new IllegalArgumentException("number of neighbors considered must be a positive value"); this.smoteNeighbors = smoteNeighbors; } /** * * @return the number of candidate neighbors to select from * when creating synthetic data points. */ public int getSmoteNeighbors() { return smoteNeighbors; } /** * Sets the desired ratio of samples for each class compared to the majority * class. A ratio of 1.0 will oversample the minority classes until they * have just as many data points as the majority class. If any minority * class already exists at a ratio equal to or above this ratio, no over * samples will be created for that class. If the target ratio is greater * than one, all classes <i>including the majority class</i> will be * over-sampled to the desired ratio. * * @param targetRatio the target ratio between each class and the majority * class */ public void setTargetRatio(double targetRatio) { this.targetRatio = targetRatio; } /** * * @return the target ratio between each class and the majority * class */ public double getTargetRatio() { return targetRatio; } /** * Sets the classifier to use after the dataset has been modified * @param baseClassifier the classifier to use for training and prediction */ public void setBaseClassifier(Classifier baseClassifier) { this.baseClassifier = baseClassifier; } /** * * @return the classifier used by the model */ public Classifier getBaseClassifier() { return baseClassifier; } @Override public CategoricalResults classify(DataPoint data) { return baseClassifier.classify(data); } @Override public void train(final ClassificationDataSet dataSet, boolean parallel) { if(dataSet.getNumCategoricalVars() != 0) throw new FailedToFitException("SMOTE only works with numeric-only feature values"); List<Vec> vAll = dataSet.getDataVectors(); IntList[] classIndex = new IntList[dataSet.getClassSize()]; for(int i = 0; i < classIndex.length; i++) classIndex[i] = new IntList(); for(int i = 0; i < dataSet.size(); i++) classIndex[dataSet.getDataPointCategory(i)].add(i); double[] priors = dataSet.getPriors(); Vec ratios = DenseVector.toDenseVec(priors).clone();//yes, make a copy - I want the priors around too! /** * How many samples does it take to reach parity with the majority class */ final int majorityNum = (int) (dataSet.size()*ratios.max()); ratios.mutableDivide(ratios.max()); final List<DataPointPair<Integer>> synthetics = new ArrayList<>(); //Go through and perform oversampling of each class for(final int classID : ListUtils.range(0, dataSet.getClassSize())) { final int samplesNeeded = (int) (majorityNum * targetRatio - classIndex[classID].size()); if(samplesNeeded <= 0) continue; //collect the vectors we need to interpolate with final List<Vec> V_id = new ArrayList<>(); for(int i : classIndex[classID]) V_id.add(vAll.get(i)); VectorCollection<Vec> VC_id = new DefaultVectorCollection<>(dm, V_id, parallel); //find all the nearest neighbors for each point so we know who to interpolate with List<List<Integer>> neighbors = new ArrayList<>(); List<List<Double>> distances = new ArrayList<>(); VC_id.search(VC_id, smoteNeighbors+1, neighbors, distances, parallel); ParallelUtils.run(parallel, samplesNeeded, (start, end)-> { Random rand = RandomUtil.getRandom(); List<DataPoint> local_new = new ArrayList<>(); for (int i = start; i < end; i++) { int sampleIndex = i % V_id.size(); //which of the neighbors should we use? int nn = rand.nextInt(smoteNeighbors) + 1;//index 0 is ourselve Vec vec_nn = VC_id.get(neighbors.get(sampleIndex).get(nn)); double gap = rand.nextDouble(); // x ~ U(0, 1) //new = sample + x * diff //where diff = (sample - other) //equivalent to //new = sample * (x+1) + other * x Vec newVal = V_id.get(sampleIndex).clone(); newVal.mutableMultiply(gap + 1); newVal.mutableAdd(gap, vec_nn); local_new.add(new DataPoint(newVal)); } synchronized (synthetics) { for (DataPoint v : local_new) synthetics.add(new DataPointPair<>(v, classID)); } }); } ClassificationDataSet newDataSet = new ClassificationDataSet(ListUtils.mergedView(synthetics, dataSet.getAsDPPList()), dataSet.getPredicting()); baseClassifier.train(newDataSet, parallel); } @Override public void train(ClassificationDataSet dataSet) { train(dataSet, false); } @Override public boolean supportsWeightedData() { return false; } @Override public SMOTE clone() { return new SMOTE(this); } }
11,627
34.559633
152
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/knn/DANN.java
package jsat.classifiers.knn; import static java.lang.Math.*; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import jsat.DataSet; import jsat.classifiers.*; import jsat.distributions.Distribution; import jsat.distributions.discrete.UniformDiscrete; import jsat.linear.*; import jsat.linear.distancemetrics.EuclideanDistance; import jsat.linear.distancemetrics.MahalanobisDistance; import jsat.linear.vectorcollection.DefaultVectorCollection; import jsat.linear.vectorcollection.VectorCollection; import jsat.parameters.Parameterized; import jsat.utils.BoundedSortedList; /** * DANN is an implementation of Discriminant Adaptive Nearest Neighbor. DANN has * a fixed <i>O(n)</i> classification time. At each classification, DANN uses a * large set of points to iteratively create and adjust a distance metic that * reflects the separability of classes at a localized level. This increases the * work considerably over a normal {@link NearestNeighbour} classifier. The * localized metric is similar to the {@link MahalanobisDistance} * <br> * Because DANN builds its own metric, it is not possible to provide one. The * {@link VectorCollectionFactory} allowed in the constructor is to accelerate * the first convergence step. In homogeneous areas of the data set, queries can * be answered in <i>O(log n)</i> if the vector collection supports it. * <br><br> * See: Hastie, T.,&amp;Tibshirani, R. (1996). <i>Discriminant adaptive nearest * neighbor classification</i>. IEEE Transactions on Pattern Analysis and * Machine Intelligence, 18(6), 607–616. doi:10.1109/34.506411 * @author Edward Raff */ public class DANN implements Classifier, Parameterized { private static final long serialVersionUID = -272865942127664672L; /** * The default number of neighbors to use when building a metric is * {@value #DEFAULT_KN}. */ public static final int DEFAULT_KN = 40; /** * The default number of neighbors to use when classifying is * {@value #DEFAULT_K} */ public static final int DEFAULT_K = 1; /** * The default regularization used when building a metric is * {@value #DEFAULT_EPS} */ public static final double DEFAULT_EPS = 1.0; /** * The default number of iterations for creating the metric is * {@value #DEFAULT_ITERATIONS} */ public static final int DEFAULT_ITERATIONS = 1; private int kn; private int k; private int maxIterations; private double eps; private CategoricalData predicting; /** * Vectors paired with their index in the original data set */ private VectorCollection<VecPaired<Vec, Integer>> vc; private List<VecPaired<Vec, Integer>> vecList; /** * Creates a new DANN classifier */ public DANN() { this(DEFAULT_KN, DEFAULT_K); } /** * Creates a new DANN classifier * @param kn the number of neighbors to use in casting a net to build a new metric * @param k the number of neighbors to use with the final metric in classification */ public DANN(int kn, int k) { this(kn, k, DEFAULT_EPS); } /** * Creates a new DANN classifier * @param kn the number of neighbors to use in casting a net to build a new metric * @param k the number of neighbors to use with the final metric in classification * @param eps the regularization to add to the metric creation */ public DANN(int kn, int k, double eps) { this(kn, k, eps, new DefaultVectorCollection<VecPaired<Vec, Integer>>()); } /** * Creates a new DANN classifier * @param kn the number of neighbors to use in casting a net to build a new metric * @param k the number of neighbors to use with the final metric in classification * @param eps the regularization to add to the metric creation * @param vcf the default vector collection that will be used for initial * neighbor search */ public DANN(int kn, int k, double eps, VectorCollection<VecPaired<Vec, Integer>> vcf) { this(kn, k, eps, DEFAULT_ITERATIONS, vcf); } /** * Creates a new DANN classifier * @param kn the number of neighbors to use in casting a net to build a new metric * @param k the number of neighbors to use with the final metric in classification * @param eps the regularization to add to the metric creation * @param maxIterations the maximum number of times to adjust the metric for * each classification * @param vcf the default vector collection that will be used for initial * neighbor search */ public DANN(int kn, int k, double eps, int maxIterations, VectorCollection<VecPaired<Vec, Integer>> vcf) { setK(k); setKn(kn); setEpsilon(eps); setMaxIterations(maxIterations); this.vc = vcf; } /** * Sets the number of nearest neighbors to use when predicting * @param k the number of neighbors */ public void setK(int k) { if(k < 1) throw new ArithmeticException("Number of neighbors must be positive"); this.k = k; } /** * Returns the number of nearest neighbors to use when predicting * @return the number of neighbors used for classification */ public int getK() { return k; } /** * Sets the number of nearest neighbors to use when adapting the distance * metric. At each iteration of the algorithm, a new distance metric will be * created. A larger number of neighbors is used to create a net of points, * around which the metric will be adapted. * * @param kn the number of neighbors to use */ public void setKn(int kn) { if(kn < 2) throw new ArithmeticException("At least 2 neighbors are needed to adapat the metric"); this.kn = kn; } /** * Returns the number of nearest neighbors to use when adapting the distance * metric * @return the number of neighbors used to adapt the metric */ public int getKn() { return kn; } /** * Sets the number of times a new distance metric will be created for each * query. The metric should converge quickly. For this reason, and do to a * lack of performance difference, it is highly recommended to use the * default of 1 iteration. * * @param maxIterations the maximum number of times the metric will be updated */ public void setMaxIterations(int maxIterations) { if(maxIterations < 1) throw new RuntimeException("At least one iteration must be performed"); this.maxIterations = maxIterations; } /** * Returns the number of times the distance metric will be updated. * @return the number of iterations the metric will be updated */ public int getMaxIterations() { return maxIterations; } /** * Sets the regularization to apply the the diagonal of the scatter matrix * when creating each new metric. * * @param eps the regularization value */ public void setEpsilon(double eps) { if(eps < 0 || Double.isInfinite(eps) || Double.isNaN(eps)) throw new ArithmeticException("Regularization must be a positive value"); this.eps = eps; } /** * Returns the regularization parameter that is applied to the diagonal of * the matrix when creating each new metric. * @return the regularization used. */ public double getEpsilon() { return eps; } @Override public CategoricalResults classify(DataPoint data) { CategoricalResults cr = new CategoricalResults(predicting.getNumOfCategories()); int n = data.numNumericalValues(); Matrix sigma = Matrix.eye(n); Matrix B = new DenseMatrix(n, n); Matrix W = new DenseMatrix(n, n); Vec query = data.getNumericalValues(); Vec scratch0 = new DenseVector(n); //TODO threadlocal DoubleList / DenseVec might be better in practice for memory use double[] weights = new double[kn]; double[] priors = new double[predicting.getNumOfCategories()]; int[] classCount = new int[priors.length]; double sumOfWeights; Vec mean = new DenseVector(sigma.rows()); Vec[] classMeans = new Vec[predicting.getNumOfCategories()]; for(int i = 0; i < classMeans.length; i++) classMeans[i] = new DenseVector(mean.length()); for(int iter = 0; iter < maxIterations; iter++) { //Zero out prev iter mean.zeroOut(); Arrays.fill(priors, 0); Arrays.fill(classCount, 0); for(int i = 0; i < classMeans.length; i++) classMeans[i].zeroOut(); sumOfWeights = 0; B.zeroOut(); W.zeroOut(); List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> vecs = (iter == 0) ? vc.search(query, kn) : brute(query, sigma, kn); //Compute vector mean and weight sums, class weight sums, and the class means double h = vecs.get(vecs.size()-1).getPair(); for(int i = 0; i < vecs.size(); i++) { VecPaired<? extends VecPaired<Vec, Integer>, Double> vec = vecs.get(i); //vecs contains the distances, we need the distance squared weights[i] = pow(pow(1-pow(vec.getPair(), 2) /h, 3), 3); sumOfWeights += weights[i]; mean.mutableAdd(vec); int j = vec.getVector().getPair(); priors[j] += weights[i]; classMeans[j].mutableAdd(vec); classCount[j]++; } //Final divide for means and priors mean.mutableDivide(kn); for(int i = 0; i < classMeans.length; i++) { if(classCount[i] != 0.0) classMeans[i].mutableDivide(classCount[i]); priors[i] /= sumOfWeights; } //Compute B & W for(int j = 0; j < classMeans.length; j++) { //One line for B's work if(priors[j] > 0) { classMeans[j].copyTo(scratch0); scratch0.mutableSubtract(mean); Matrix.OuterProductUpdate(B, scratch0, scratch0, priors[j]); //Loop for W's work for(int i = 0; i < vecs.size(); i++) { VecPaired<? extends VecPaired<Vec, Integer>, Double> x = vecs.get(i); if(x.getVector().getPair() == j) { x.copyTo(scratch0); scratch0.mutableSubtract(classMeans[j]); Matrix.OuterProductUpdate(W, scratch0, scratch0, weights[i]); } } } } //Final divide for W W.mutableMultiply(1.0/sumOfWeights); RowColumnOps.addDiag(B, 0, B.rows(), eps); //Check, if there is a prior of 1, nothing will ever be updated. //Might as well return for(int i = 0; i < priors.length; i++) if(priors[i] == 1.0) { cr.setProb(i, 1.0); return cr; } EigenValueDecomposition evd = new EigenValueDecomposition(W); Matrix D = evd.getD(); for(int i = 0; i < D.rows(); i++) D.set(i, i, pow(D.get(i, i), -0.5)); Matrix VT =evd.getVT(); Matrix WW = VT.transposeMultiply(D).multiply(VT); sigma.zeroOut(); WW.multiply(B).multiply(WW, sigma); } List<? extends VecPaired<? extends VecPaired<Vec, Integer>, Double>> knn = brute(query, sigma, k); for(VecPaired<? extends VecPaired<Vec, Integer>, Double> nn : knn) cr.incProb(nn.getVector().getPair(), 1.0); cr.normalize(); return cr; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { predicting = dataSet.getPredicting(); vecList = new ArrayList<>(dataSet.size()); for(int i = 0; i < dataSet.size(); i++) vecList.add(new VecPaired<>(dataSet.getDataPoint(i).getNumericalValues(), dataSet.getDataPointCategory(i))); vc.build(parallel, vecList, new EuclideanDistance()); } @Override public boolean supportsWeightedData() { return false; } @Override public Classifier clone() { DANN clone = new DANN(kn, k, maxIterations, vc.clone()); if(this.predicting != null) clone.predicting = this.predicting.clone(); if(this.vc != null) clone.vc = this.vc.clone(); if(this.vecList != null) clone.vecList = new ArrayList<>(this.vecList); return clone; } private double dist(Matrix sigma, Vec query, Vec mean, Vec scratch0, Vec scartch1) { query.copyTo(scratch0); scratch0.mutableSubtract(mean); scartch1.zeroOut(); sigma.multiply(scratch0, 1.0, scartch1); return scratch0.dot(scartch1); } private List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> brute(Vec query, Matrix sigma, int num) { Vec scartch0 = new DenseVector(query.length()); Vec scartch1 = new DenseVector(query.length()); BoundedSortedList<VecPairedComparable<VecPaired<Vec, Integer>, Double>> knn = new BoundedSortedList<>(num, num); for(VecPaired<Vec, Integer> v : vecList) { double d = dist(sigma, query, v, scartch0, scartch1); knn.add(new VecPairedComparable<>(v, d)); } return (List<VecPaired<VecPaired<Vec, Integer>, Double>>) (Object) knn; } /** * Guesses the distribution to use for the number of neighbors to consider * * @param d the dataset to get the guess for * @return the guess for the K parameter */ public static Distribution guessK(DataSet d) { return new UniformDiscrete(1, 25); } /** * Guesses the distribution to use for the number of neighbors to consider * * @param d the dataset to get the guess for * @return the guess for the Kn parameter */ public static Distribution guessKn(DataSet d) { return new UniformDiscrete(40, Math.max(d.size()/5, 50)); } }
15,096
33.626147
120
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/knn/LWL.java
package jsat.classifiers.knn; import java.util.*; import jsat.DataSet; import jsat.classifiers.*; import jsat.classifiers.bayesian.NaiveBayesUpdateable; import jsat.distributions.Distribution; import jsat.distributions.discrete.UniformDiscrete; import jsat.distributions.empirical.kernelfunc.*; import jsat.exceptions.UntrainedModelException; import jsat.linear.Vec; import jsat.linear.VecPaired; import jsat.linear.distancemetrics.DistanceMetric; import jsat.linear.distancemetrics.TrainableDistanceMetric; import jsat.linear.vectorcollection.*; import jsat.parameters.*; import jsat.regression.*; /** * Locally Weighted Learner (LW) is the combined generalized implementation of * Locally Weighted Regression (LWR) and Locally Weighted Naive Bayes (LWNB). * The concept is simple, prediction begins in a normal NN style. Instead of * returning the prediction result as the average / majority of the found * neighbors, a classifier is trained to represent the local area that is * represented by the neighbors. The prediction result is then based on this * locally trained model. <br> * Because NN search is already slow, and increasing the search size increases * the search time, it is recommended to use moderate sized values of <i>k</i> * with simple models such as {@link NaiveBayesUpdateable NaiveBayes} and * {@link MultipleLinearRegression LinearRegression}. <br> * If the learning algorithm used does not support weighted data points, it will * be as if the {@link UniformKF uniform kernel fucntion} was used, regardless * of whatever kernel function was set. <br> * <br>See:<br> * <ul> * <li>Atkeson, C., Moore, A.,&amp;Schaal, S. (1997). * <a href="http://www.springerlink.com/index/G8280541763Q0223.pdf">Locally * Weighted Learning</a>. Artificial intelligence review, 11–73.</li> * <li>Frank, E., Hall, M.,&amp;Pfahringer, B. (2003). * <a href="http://dl.acm.org/citation.cfm?id=2100614">Locally Weighted Naive * Bayes</a>. Proceedings of the Conference on Uncertainty in Artificial * Intelligence (pp. 249–256). Morgan Kaufmann.</li> * </ul> * @author Edward Raff */ public class LWL implements Classifier, Regressor, Parameterized { private static final long serialVersionUID = 6942465758987345997L; private CategoricalData predicting; private Classifier classifier; private Regressor regressor; private int k; private DistanceMetric dm; private KernelFunction kf; private VectorCollection<VecPaired<Vec, Double>> vc; /** * Copy constructor * @param toCopy the version to copy */ private LWL(LWL toCopy) { if(toCopy.predicting != null) this.predicting = toCopy.predicting.clone(); if(toCopy.classifier != null) setClassifier(toCopy.classifier); if(toCopy.regressor != null) setRegressor(toCopy.regressor); setNeighbors(toCopy.k); setDistanceMetric(toCopy.dm.clone()); setKernelFunction(toCopy.kf); if(toCopy.vc != null) this.vc = toCopy.vc.clone(); } /** * Creates a new LWL classifier * @param classifier the local classifier to * @param k the number of neighbors to create a local classifier from * @param dm the metric to use when selecting the nearest points to a query */ public LWL(Classifier classifier, int k, DistanceMetric dm) { this(classifier, k, dm, EpanechnikovKF.getInstance()); } /** * Creates a new LWL classifier * @param classifier the local classifier to * @param k the number of neighbors to create a local classifier from * @param dm the metric to use when selecting the nearest points to a query * @param kf the kernel function used to weight the local points */ public LWL(Classifier classifier, int k, DistanceMetric dm, KernelFunction kf) { this(classifier, k, dm, kf, new DefaultVectorCollection<VecPaired<Vec, Double>>()); } /** * Creates a new LWL classifier * @param classifier the local classifier to * @param k the number of neighbors to create a local classifier from * @param dm the metric to use when selecting the nearest points to a query * @param kf the kernel function used to weight the local points * @param vcf the factory to create vector collections for storing the points */ public LWL(Classifier classifier, int k, DistanceMetric dm, KernelFunction kf, VectorCollection<VecPaired<Vec, Double>> vcf) { setClassifier(classifier); setNeighbors(k); setDistanceMetric(dm); setKernelFunction(kf); this.vc = vcf; } /** * Creates a new LWL Regressor * @param regressor the local regressor * @param k the number of neighbors to create a local classifier from * @param dm the metric to use when selecting the nearest points to a query */ public LWL(Regressor regressor, int k, DistanceMetric dm) { this(regressor, k, dm, EpanechnikovKF.getInstance()); } /** * Creates a new LWL Regressor * @param regressor the local regressor * @param k the number of neighbors to create a local classifier from * @param dm the metric to use when selecting the nearest points to a query * @param kf the kernel function used to weight the local points */ public LWL(Regressor regressor, int k, DistanceMetric dm, KernelFunction kf) { this(regressor, k, dm, kf, new DefaultVectorCollection<VecPaired<Vec, Double>>()); } /** * Creates a new LWL Regressor * @param regressor the local regressor * @param k the number of neighbors to create a local classifier from * @param dm the metric to use when selecting the nearest points to a query * @param kf the kernel function used to weight the local points * @param vcf the factory to create vector collections for storing the points */ public LWL(Regressor regressor, int k, DistanceMetric dm, KernelFunction kf, VectorCollection<VecPaired<Vec, Double>> vcf) { setRegressor(regressor); setNeighbors(k); setDistanceMetric(dm); setKernelFunction(kf); this.vc = vcf; } @Override public CategoricalResults classify(DataPoint data) { if(classifier == null || vc == null) throw new UntrainedModelException("Model has not been trained"); List<? extends VecPaired<VecPaired<Vec, Double>, Double>> knn = vc.search(data.getNumericalValues(), k); ClassificationDataSet localSet = new ClassificationDataSet(knn.get(0).length(), new CategoricalData[0], predicting); double maxD = knn.get(knn.size()-1).getPair(); for(int i = 0; i < knn.size(); i++) { VecPaired<VecPaired<Vec, Double>, Double> v = knn.get(i); localSet.addDataPoint(v, v.getVector().getPair().intValue(), kf.k(v.getPair()/maxD)); } Classifier localClassifier = classifier.clone(); localClassifier.train(localSet); return localClassifier.classify(data); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { List<VecPaired<Vec, Double>> trainList = getVecList(dataSet); TrainableDistanceMetric.trainIfNeeded(dm, dataSet, parallel); vc.build(parallel, trainList, dm); predicting = dataSet.getPredicting(); } @Override public boolean supportsWeightedData() { return false; } @Override public double regress(DataPoint data) { if(regressor == null || vc == null) throw new UntrainedModelException("Model has not been trained"); List<? extends VecPaired<VecPaired<Vec, Double>, Double>> knn = vc.search(data.getNumericalValues(), k); RegressionDataSet localSet = new RegressionDataSet(knn.get(0).length(), new CategoricalData[0]); double maxD = knn.get(knn.size()-1).getPair(); for(int i = 0; i < knn.size(); i++) { VecPaired<VecPaired<Vec, Double>, Double> v = knn.get(i); localSet.addDataPoint(v, v.getVector().getPair()); localSet.setWeight(i, kf.k(v.getPair()/maxD)); } Regressor localRegressor = regressor.clone(); localRegressor.train(localSet); return localRegressor.regress(data); } @Override public void train(RegressionDataSet dataSet, boolean parallel) { List<VecPaired<Vec, Double>> trainList = getVecList(dataSet); TrainableDistanceMetric.trainIfNeeded(dm, dataSet, parallel); vc.build(parallel, trainList, dm); } @Override public LWL clone() { return new LWL(this); } private List<VecPaired<Vec, Double>> getVecList(ClassificationDataSet dataSet) { List<VecPaired<Vec, Double>> trainList = new ArrayList<>(dataSet.size()); for(int i = 0; i < dataSet.size(); i++) trainList.add(new VecPaired<>( dataSet.getDataPoint(i).getNumericalValues(), new Double(dataSet.getDataPointCategory(i)))); return trainList; } private List<VecPaired<Vec, Double>> getVecList(RegressionDataSet dataSet) { List<VecPaired<Vec, Double>> trainList = new ArrayList<>(dataSet.size()); for(int i = 0; i < dataSet.size(); i++) trainList.add(new VecPaired<>( dataSet.getDataPoint(i).getNumericalValues(), dataSet.getTargetValue(i))); return trainList; } private void setClassifier(Classifier classifier) { this.classifier = classifier; if(classifier instanceof Regressor) this.regressor = (Regressor) classifier; } private void setRegressor(Regressor regressor) { this.regressor = regressor; if(regressor instanceof Classifier) this.classifier = (Classifier)regressor; } /** * Sets the number of neighbors that will be used to create the local model * @param k the number of neighbors to obtain */ public void setNeighbors(int k) { if(k <= 1) throw new RuntimeException("An average requires at least 2 neighbors to be taken into account"); this.k = k; } /** * Returns the number of neighbors that will be used to create each local model * @return the number of neighbors that will be used */ public int getNeighbors() { return k; } /** * Sets the distance metric that will be used for the nearest neighbor search * @param dm the distance metric to use for nearest neighbor search */ public void setDistanceMetric(DistanceMetric dm) { this.dm = dm; } /** * Returns the distance metric in use * @return the distance metric in use */ public DistanceMetric getDistanceMetric() { return dm; } /** * Sets the kernel function that will be used to set the weights of each * data point in the local set * @param kf the kernel function to use for weighting */ public void setKernelFunction(KernelFunction kf) { this.kf = kf; } /** * Returns the kernel function that will be used to set the weights. * @return the kernel function that will be used to set the weights */ public KernelFunction getKernelFunction() { return kf; } /** * Guesses the distribution to use for the number of neighbors to consider * * @param d the dataset to get the guess for * @return the guess for the Neighbors parameter */ public static Distribution guessNeighbors(DataSet d) { return new UniformDiscrete(25, Math.min(200, d.size()/5)); } }
12,038
34.099125
128
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/knn/NearestNeighbour.java
package jsat.classifiers.knn; import java.util.*; import jsat.DataSet; import jsat.classifiers.*; import jsat.distributions.Distribution; import jsat.distributions.discrete.UniformDiscrete; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.linear.Vec; import jsat.linear.VecPaired; import jsat.linear.distancemetrics.*; import jsat.linear.vectorcollection.*; import jsat.math.MathTricks; import jsat.parameters.*; import jsat.regression.RegressionDataSet; import jsat.regression.Regressor; /** * An implementation of the Nearest Neighbor algorithm, but with a * British spelling! How fancy. * @author Edward Raff */ public class NearestNeighbour implements Classifier, Regressor, Parameterized { private static final long serialVersionUID = 4239569189624285932L; private int k; private boolean weighted; private DistanceMetric distanceMetric; private CategoricalData predicting; private VectorCollection<VecPaired<Vec, Double>> vecCollection; /** * Returns the number of neighbors currently consulted to make decisions * @return the number of neighbors */ public int getNeighbors() { return k; } /** * Sets the number of neighbors to consult when making decisions * @param k the number of neighbors to use */ public void setNeighbors(int k) { if(k < 1) throw new ArithmeticException("Must be a positive number of neighbors"); this.k = k; } public int getNeighbors(int k) { return k; } public DistanceMetric getDistanceMetric() { return distanceMetric; } public void setDistanceMetric(DistanceMetric distanceMetric) { if(distanceMetric == null) throw new NullPointerException("given metric was null"); this.distanceMetric = distanceMetric; } private enum Mode {REGRESSION, CLASSIFICATION}; /** * If we are in classification mode, the double is an integer that indicates class. */ Mode mode; /** * Constructs a new Nearest Neighbor Classifier * @param k the number of neighbors to use */ public NearestNeighbour(int k) { this(k, false); } /** * Constructs a new Nearest Neighbor Classifier * @param k the number of neighbors to use * @param vcf the vector collection factory to use for storing and querying */ public NearestNeighbour(int k, VectorCollection<VecPaired<Vec, Double>> vcf) { this(k, false, new EuclideanDistance(), vcf); } /** * Constructs a new Nearest Neighbor Classifier * @param k the number of neighbors to use * @param weighted whether or not to weight the influence of neighbors by their distance */ public NearestNeighbour(int k, boolean weighted) { this(k, weighted, new EuclideanDistance()); } /** * Constructs a new Nearest Neighbor Classifier * @param k the number of neighbors to use * @param weighted whether or not to weight the influence of neighbors by their distance * @param distanceMetric the method of computing distance between two vectors. */ public NearestNeighbour(int k, boolean weighted, DistanceMetric distanceMetric ) { this(k, weighted, distanceMetric, new DefaultVectorCollection<VecPaired<Vec, Double>>()); } /** * Constructs a new Nearest Neighbor Classifier * @param k the number of neighbors to use * @param weighted whether or not to weight the influence of neighbors by their distance * @param distanceMetric the method of computing distance between two vectors. * @param vcf the vector collection factory to use for storing and querying */ public NearestNeighbour(int k, boolean weighted, DistanceMetric distanceMetric, VectorCollection<VecPaired<Vec, Double>> vcf ) { this.mode = null; this.vecCollection = vcf; this.k = k; this.weighted = weighted; this.distanceMetric = distanceMetric; this.vecCollection.setDistanceMetric(distanceMetric); } @Override public CategoricalResults classify(DataPoint data) { if(vecCollection == null || mode != Mode.CLASSIFICATION) throw new UntrainedModelException("Classifier has not been trained for classification"); Vec query = data.getNumericalValues(); List<? extends VecPaired<VecPaired<Vec, Double>, Double>> knns = vecCollection.search(query, k); if(weighted) { double[] dists = new double[knns.size()]; for(int i = 0; i < knns.size(); i++) dists[i] = knns.get(i).getPair(); //we want something like 1/dist to make closer values weighted higher double offset = MathTricks.min(dists)*0.1+1e-15; for(int i = 0; i < knns.size(); i++) dists[i] = 1/(offset+dists[i]); MathTricks.softmax(dists, false); CategoricalResults results = new CategoricalResults(predicting.getNumOfCategories()); for(int i = 0; i < knns.size(); i++) { VecPaired<Vec, Double> pm = knns.get(i).getVector(); int index = (int) Math.round(pm.getPair()); results.setProb(index, results.getProb(index) + dists[i]);//all weights are 1 } return results; } else { CategoricalResults results = new CategoricalResults(predicting.getNumOfCategories()); for(int i = 0; i < knns.size(); i++) { VecPaired<Vec, Double> pm = knns.get(i).getVector(); int index = (int) Math.round(pm.getPair()); results.setProb(index, results.getProb(index) + 1.0);//all weights are 1 } results.normalize(); return results; } } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { if(dataSet.getNumCategoricalVars() != 0) throw new FailedToFitException("KNN requires vector data only"); mode = Mode.CLASSIFICATION; this.predicting = dataSet.getPredicting(); List<VecPaired<Vec, Double>> dataPoints = new ArrayList<VecPaired<Vec, Double>>(dataSet.size()); //Add all the data points for(int i = 0; i < dataSet.getClassSize(); i++) { for(DataPoint dp : dataSet.getSamples(i)) { //We want to include the category in this case, so we will add it to the vector dataPoints.add(new VecPaired(dp.getNumericalValues(), (double)i));//bug? why isnt this auto boxed to double w/o a cast? } } TrainableDistanceMetric.trainIfNeeded(distanceMetric, dataSet, parallel); vecCollection.build(parallel, dataPoints, distanceMetric); } @Override public double regress(DataPoint data) { if(vecCollection == null || mode != Mode.REGRESSION) throw new UntrainedModelException("Classifier has not been trained for regression"); Vec query = data.getNumericalValues(); List<? extends VecPaired<VecPaired<Vec, Double>, Double>> knns = vecCollection.search(query, k); double result = 0, weightSum = 0; for(int i = 0; i < knns.size(); i++) { double distance = knns.get(i).getPair(); VecPaired<Vec, Double> pm = knns.get(i).getVector(); double value = pm.getPair(); if(weighted) { distance = Math.max(1e-8, distance);//Avoiding zero distances which will result in Infinty getting propigated around double weight = 1.0/Math.pow(distance, 2); weightSum += weight; result += value*weight; } else { result += value; weightSum += 1; } } return result/weightSum; } @Override public void train(RegressionDataSet dataSet, boolean parallel) { if(dataSet.getNumCategoricalVars() != 0) throw new FailedToFitException("KNN requires vector data only"); mode = Mode.REGRESSION; List<VecPaired<Vec, Double>> dataPoints = new ArrayList<VecPaired<Vec, Double>>(dataSet.size()); //Add all the data points for (int i = 0; i < dataSet.size(); i++) { DataPointPair<Double> dpp = dataSet.getDataPointPair(i); dataPoints.add(new VecPaired(dpp.getVector(), dpp.getPair())); } TrainableDistanceMetric.trainIfNeeded(distanceMetric, dataSet, parallel); vecCollection.build(parallel, dataPoints, distanceMetric); } @Override public NearestNeighbour clone() { NearestNeighbour clone = new NearestNeighbour(k, weighted, distanceMetric.clone(), vecCollection.clone()); if(this.predicting != null) clone.predicting = this.predicting.clone(); clone.mode = this.mode; if(this.vecCollection != null) clone.vecCollection = this.vecCollection.clone(); return clone; } /** * Guesses the distribution to use for the number of neighbors to consider * @param d the dataset to get the guess for * @return the guess for the Neighbors parameter */ public static Distribution guessNeighbors(DataSet d) { return new UniformDiscrete(1, 25); } @Override public boolean supportsWeightedData() { return false; } }
9,935
32.009967
135
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/ALMA2.java
package jsat.classifiers.linear; import jsat.SingleWeightVectorModel; import jsat.classifiers.*; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.linear.DenseVector; import jsat.linear.Vec; /** * Provides a linear implementation of the ALMAp algorithm for p = 2, which is * considerably more efficient to compute. It is a binary classifier for numeric * features. * <br> * ALMA requires one major parameter {@link #setAlpha(double) alpha} to be set, * the other two have default behavior / values that have provable convergence. * <br><br> * See: Gentile, C. (2002). <i>A New Approximate Maximal Margin Classification * Algorithm</i>. The Journal of Machine Learning Research, 2, 213–242. * Retrieved from <a href="http://dl.acm.org/citation.cfm?id=944811">here</a> * * @author Edward Raff */ public class ALMA2 extends BaseUpdateableClassifier implements BinaryScoreClassifier, SingleWeightVectorModel { private static final long serialVersionUID = -4347891273721908507L; private Vec w; private static final double p = 2; private double alpha; private double B; private double C = Math.sqrt(2); private int k; private boolean useBias = true; private double bias; /** * Creates a new ALMA learner using an alpha of 0.8 */ public ALMA2() { this(0.8); } /** * Creates a new ALMA learner using the given alpha * @param alpha the alpha value to use * @see #setAlpha(double) */ public ALMA2(double alpha) { setAlpha(alpha); } /** * Copy constructor * @param other the object to copy */ protected ALMA2(ALMA2 other) { if(other.w != null) this.w = other.w.clone(); this.alpha = other.alpha; this.B = other.B; this.C = other.C; this.k = other.k; } /** * Returns the weight vector used to compute results via a dot product. <br> * Do not modify this value, or you will alter the results returned. * @return the learned weight vector for prediction */ public Vec getWeightVec() { return w; } /** * Alpha controls the approximation of the large margin formed by ALMA, * with larger values causing more updates. A value of 1.0 will update only * on mistakes, while smaller values update if the error was not far enough * away from the margin. * <br><br> * NOTE: Whenever alpha is set, the value of {@link #setB(double) B} will * also be set to an appropriate value. This is not the only possible value * that will lead to convergence, and can be set manually after alpha is set * to another value. * * @param alpha the approximation scale in (0.0, 1.0] */ public void setAlpha(double alpha) { if(alpha <= 0 || alpha > 1 || Double.isNaN(alpha)) throw new ArithmeticException("alpha must be in (0, 1], not " + alpha); this.alpha = alpha; setB(1.0/alpha); } /** * Returns the approximation coefficient used * @return the approximation coefficient used */ public double getAlpha() { return alpha; } /** * Sets the B variable of the ALMA algorithm, this is set automatically by * {@link #setAlpha(double) }. * @param B the value for B */ public void setB(double B) { this.B = B; } /** * Returns the B value of the ALMA algorithm * @return the B value of the ALMA algorithm */ public double getB() { return B; } /** * Sets the C value of the ALMA algorithm. The default value is the one * suggested in the paper. * @param C the C value of ALMA */ public void setC(double C) { if(C <= 0 || Double.isInfinite(C) || Double.isNaN(C)) throw new ArithmeticException("C must be a posative cosntant"); this.C = C; } public double getC() { return C; } /** * Sets whether or not an implicit bias term will be added to the data set * @param useBias {@code true} to add an implicit bias term */ public void setUseBias(boolean useBias) { this.useBias = useBias; } /** * Returns whether or not an implicit bias term is in use * @return {@code true} if a bias term is in use */ public boolean isUseBias() { return useBias; } @Override public ALMA2 clone() { return new ALMA2(this); } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { if(numericAttributes <= 0) throw new FailedToFitException("ALMA2 requires numeric features"); if(predicting.getNumOfCategories() != 2) throw new FailedToFitException("ALMA2 works only for binary classification"); w = new DenseVector(numericAttributes); k = 1; } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { final Vec x_t = dataPoint.getNumericalValues(); final double y_t = targetClass*2-1; double gamma = B * Math.sqrt(p-1) / k; double wx = w.dot(x_t)+bias; if(y_t*wx <= (1-alpha)*gamma)//update { double eta = C/Math.sqrt(p-1)/Math.sqrt(k++); w.mutableAdd(eta*y_t, x_t); if(useBias) bias += eta*y_t; final double norm = w.pNorm(2)+bias; if(norm > 1) w.mutableDivide(norm); } } @Override public CategoricalResults classify(DataPoint data) { if(w == null) throw new UntrainedModelException("The model has not yet been trained"); double wx = getScore(data); CategoricalResults cr =new CategoricalResults(2); if(wx < 0) cr.setProb(0, 1.0); else cr.setProb(1, 1.0); return cr; } @Override public double getScore(DataPoint dp) { return w.dot(dp.getNumericalValues()); } @Override public boolean supportsWeightedData() { return false; } @Override public Vec getRawWeight() { return w; } @Override public double getBias() { return bias; } @Override public Vec getRawWeight(int index) { if(index < 1) return getRawWeight(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public double getBias(int index) { if (index < 1) return getBias(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public int numWeightsVecs() { return 1; } }
7,046
25.592453
113
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/AROW.java
package jsat.classifiers.linear; import java.util.List; import jsat.DataSet; import jsat.SingleWeightVectorModel; import jsat.classifiers.BaseUpdateableClassifier; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.classifiers.DataPoint; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.distributions.Distribution; import jsat.distributions.LogUniform; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.linear.DenseVector; import jsat.linear.IndexValue; import jsat.linear.Matrix; import jsat.linear.Vec; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; /** * An implementation of Adaptive Regularization of Weight Vectors (AROW), which * uses second order information to learn a large margin binary classifier. As * such, updates can occur on correctly classified instances if they are not far * enough from the margin. Unlike many margin algorithms, it handles noise well. * <br> * NOTE: This implementation does not add an implicit bias term, so the solution * goes through the origin * <br><br> * See: Crammer, K., Kulesza, A.,&amp;Dredze, M. (2013). <i>Adaptive * regularization of weight vectors</i>. Machine Learning, 91(2), 155–187. * doi:10.1007/s10994-013-5327-x * * @author Edward Raff */ public class AROW extends BaseUpdateableClassifier implements BinaryScoreClassifier, Parameterized, SingleWeightVectorModel { private static final long serialVersionUID = 443803827811508204L; private Vec w; /** * Full covariance matrix */ private Matrix sigmaM; /** * Diagonal only covariance matrix */ private Vec sigmaV; private boolean diagonalOnly = false; private double r; /** * Temp vector used to store Sigma * x_t. Make sure the vector is zeroed out * before returning from update */ private Vec Sigma_xt; /** * Creates a new AROW learner */ public AROW() { this(1e-2, true); } /** * Creates a new AROW learner * @param r the regularization parameter * @param diagonalOnly whether or not to use only the diagonal of the covariance * @see #setR(double) * @see #setDiagonalOnly(boolean) */ public AROW(double r, boolean diagonalOnly) { setR(r); setDiagonalOnly(diagonalOnly); } /** * Copy constructor * @param other object to copy */ protected AROW(AROW other) { this.r = other.r; this.diagonalOnly = other.diagonalOnly; if(other.w != null) this.w = other.w.clone(); if(other.sigmaM != null) this.sigmaM = other.sigmaM.clone(); if(other.sigmaV != null) this.sigmaV = other.sigmaV.clone(); if(other.Sigma_xt != null) this.Sigma_xt = other.Sigma_xt.clone(); } /** * Using the full covariance matrix requires <i>O(d<sup>2</sup>)</i> work on * mistakes, where <i>d</i> is the dimension of the data. Runtime can be * reduced by using only the diagonal of the matrix to perform updates * in <i>O(s)</i> time, where <i>s &le; d</i> is the number of non-zero * values in the input * @param diagonalOnly {@code true} to use only the diagonal of the covariance */ public void setDiagonalOnly(boolean diagonalOnly) { this.diagonalOnly = diagonalOnly; } /** * Returns {@code true} if the covariance matrix is restricted to its diagonal entries * @return {@code true} if the covariance matrix is restricted to its diagonal entries */ public boolean isDiagonalOnly() { return diagonalOnly; } /** * Sets the r parameter of AROW, which controls the regularization. Larger * values reduce the change in the model on each update. * @param r the regularization parameter in (0, Inf) */ public void setR(double r) { if(Double.isNaN(r) || Double.isInfinite(r) || r <= 0) throw new IllegalArgumentException("r must be a postive constant, not " + r); this.r = r; } /** * Returns the regularization parameter * @return the regularization parameter */ public double getR() { return r; } /** * Returns the weight vector used to compute results via a dot product. <br> * Do not modify this value, or you will alter the results returned. * @return the learned weight vector for prediction */ public Vec getWeightVec() { return w; } @Override public AROW clone() { return new AROW(this); } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { if(numericAttributes <= 0) throw new FailedToFitException("AROW requires numeric attributes to perform classification"); else if(predicting.getNumOfCategories() != 2) throw new FailedToFitException("AROW is a binary classifier"); w = new DenseVector(numericAttributes); Sigma_xt = new DenseVector(numericAttributes); if(diagonalOnly) { sigmaV = new DenseVector(numericAttributes); sigmaV.mutableAdd(1); } else sigmaM = Matrix.eye(numericAttributes); } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { final Vec x_t = dataPoint.getNumericalValues(); final double y_t = targetClass*2-1; double m_t = x_t.dot(w); if(y_t == Math.signum(m_t)) return;//no update needed double v_t = 0; if(diagonalOnly) { for(IndexValue iv : x_t) { double x_ti = iv.getValue(); v_t += x_ti * x_ti * sigmaV.get(iv.getIndex()); } } else { sigmaM.multiply(x_t, 1, Sigma_xt); v_t = x_t.dot(Sigma_xt); } double b_t_inv = v_t+r; double alpha_t = Math.max(0, 1-y_t*m_t)/b_t_inv; if(!diagonalOnly) w.mutableAdd(alpha_t * y_t, Sigma_xt); else for (IndexValue iv : x_t) w.increment(iv.getIndex(), alpha_t * y_t * iv.getValue() * sigmaV.get(iv.getIndex())); if(diagonalOnly) { /* diagonal is pairwise products as well: * S += S x x' S * S x == x' S b/c symmetry * S += Sx Sx * so just square the values and then add */ for(IndexValue iv : x_t) { int idx = iv.getIndex(); double xt_i = iv.getValue()*sigmaV.get(idx); sigmaV.increment(idx, -(xt_i*xt_i)/b_t_inv); } } else { //Because Sigma is symetric, x*S == S*x Matrix.OuterProductUpdate(sigmaM, Sigma_xt, Sigma_xt, -1/b_t_inv); } //Zero out temp store if(diagonalOnly) Sigma_xt.zeroOut(); } @Override public CategoricalResults classify(DataPoint data) { if(w == null) throw new UntrainedModelException("Model has not yet ben trained"); CategoricalResults cr = new CategoricalResults(2); double score = getScore(data); if(score < 0) cr.setProb(0, 1.0); else cr.setProb(1, 1.0); return cr; } @Override public double getScore(DataPoint dp) { return w.dot(dp.getNumericalValues()); } @Override public boolean supportsWeightedData() { return false; } @Override public Vec getRawWeight() { return w; } @Override public double getBias() { return 0; } @Override public Vec getRawWeight(int index) { if(index < 1) return getRawWeight(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public double getBias(int index) { if (index < 1) return getBias(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public int numWeightsVecs() { return 1; } /** * Guess the distribution to use for the regularization term * {@link #setR(double) r} . * * @param d the data set to get the guess for * @return the guess for the r parameter */ public static Distribution guessR(DataSet d) { return new LogUniform(Math.pow(2, -4), Math.pow(2, 4));//from Exact Soft Confidence-Weighted Learning paper } }
8,912
27.567308
123
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/BBR.java
package jsat.classifiers.linear; import java.util.Arrays; import java.util.concurrent.ExecutorService; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.exceptions.FailedToFitException; import jsat.linear.Vec; import static java.lang.Math.*; import java.util.List; import jsat.SingleWeightVectorModel; import jsat.linear.DenseVector; import jsat.linear.IndexValue; import jsat.lossfunctions.LogisticLoss; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; /** * This is an implementation of Bayesian Binary Regression for L<sub>1</sub> and * L<sub>2</sub> regularized logistic regression. This model requires additional * memory to perform efficient column wise passes on the data set, assuming the * data is sparse. <br><br> * BBR uses a Trust Region Newton algorithm that allows convergence to occur in * a small number of iterations, but each iteration may be costly. * <br><br> * See: Genkin, A., Lewis, D. D.,&amp;Madigan, D. (2007). <i>Large-Scale Bayesian * Logistic Regression for Text Categorization</i>. Technometrics, 49(3), * 291–304. doi:10.1198/004017007000000245 * * @author Edward Raff */ public class BBR implements Classifier, Parameterized, SingleWeightVectorModel { private static final long serialVersionUID = 8297213093357011082L; //weight vector w is refferd to as beta in the original paper, just replace beta with w private Vec w; private int maxIterations; private double regularization; private boolean autoSetRegularization = true; private double bias; private boolean useBias = true; private double tolerance = 0.0005; private Prior prior; /** * Valid priors that control what type of regularization is applied */ public static enum Prior { /** * Laplace prior equivalent to L<sub>1</sub> regularization */ LAPLACE, /** * Gaussian prior equivalent to L<sub>2</sub> regularization */ GAUSSIAN } /** * Creates a new BBR for L<sub>1</sub> Logistic Regression object that will * use the given regularization value. * * @param regularization the regularization penalty to apply * @param maxIterations the maximum number of training iterations to perform */ public BBR(double regularization, int maxIterations) { this(regularization, maxIterations, Prior.LAPLACE); } /** * Creates a new BBR Logistic Regression object that will use the given * regularization value. * * @param regularization the regularization penalty to apply * @param maxIterations the maximum number of training iterations to perform * @param prior the prior to apply for regularization */ public BBR(double regularization, int maxIterations, Prior prior) { setMaxIterations(maxIterations); setRegularization(regularization); setAutoSetRegularization(false); setPrior(prior); } /** * Creates a new BBR for L<sub>1</sub> Logistic Regression object that will * attempt to automatically determine the regularization value to use. * * @param maxIterations the maximum number of training iterations to perform */ public BBR(int maxIterations) { this(1e-3, maxIterations, Prior.LAPLACE); } /** * Creates a new BBR Logistic Regression object that will attempt to * automatically determine the regularization value to use. * * @param maxIterations the maximum number of training iterations to perform * @param prior the prior to apply for regularization */ public BBR(int maxIterations, Prior prior) { setMaxIterations(maxIterations); setRegularization(0.01); setAutoSetRegularization(true); setPrior(prior); } /** * Copy constructor * * @param toCopy the object to copy */ protected BBR(BBR toCopy) { if (toCopy.w != null) this.w = toCopy.w.clone(); this.maxIterations = toCopy.maxIterations; this.regularization = toCopy.regularization; this.autoSetRegularization = toCopy.autoSetRegularization; this.bias = toCopy.bias; this.useBias = toCopy.useBias; this.tolerance = toCopy.tolerance; this.prior = toCopy.prior; } /** * Sets the regularization penalty to use if the algorithm has not been set * to choose one automatically. * * @param regularization sets the positive regularization penalty to use */ public void setRegularization(double regularization) { if (Double.isNaN(regularization) || Double.isNaN(regularization) || regularization <= 0) throw new IllegalArgumentException("Regularization must be positive, not " + regularization); this.regularization = regularization; } /** * Returns the regularization penalty used if the auto value is not used * * @return the regularization penalty used if the auto value is not used */ public double getRegularization() { return regularization; } /** * Sets whether or not the regularization term will be set automatically by * the algorithm, which is done as specified in the original paper. This may * choose a very large (and bad) value of the regularization term, and * should not be used with smaller data sets. This value is chosen * deterministically. * <br><br> * This value takes precedence over anything set with * {@link #setRegularization(double) } * * @param autoSetRegularization {@code true} to choose the regularization * term automatically, {@code false} to use whatever value was set before. */ public void setAutoSetRegularization(boolean autoSetRegularization) { this.autoSetRegularization = autoSetRegularization; } /** * Returns whether or not the algorithm will attempt to select the * regularization term automatically * * @return {@code true} if the regularization term is chosen automatically, * {@code false} otherwise. */ public boolean isAutoSetRegularization() { return autoSetRegularization; } /** * Sets the maximum number of iterations allowed before halting the * algorithm early. * * @param maxIterations the maximum number of training iterations */ public void setMaxIterations(int maxIterations) { this.maxIterations = maxIterations; } /** * Returns the maximum number of iterations allowed * * @return the maximum number of iterations allowed */ public int getMaxIterations() { return maxIterations; } /** * Sets the convergence tolerance target. Relative changes that are smaller * than the given tolerance will determine convergence. * <br><br> * The default value used is that suggested in the original paper of 0.0005 * * @param tolerance the positive convergence tolerance goal */ public void setTolerance(double tolerance) { if (Double.isNaN(tolerance) || Double.isInfinite(tolerance) || tolerance <= 0) throw new IllegalArgumentException("Tolerance must be positive, not " + tolerance); this.tolerance = tolerance; } /** * Returns the tolerance parameter that controls convergence * * @return the tolerance parameter that controls convergence */ public double getTolerance() { return tolerance; } /** * Sets whether or not an implicit bias term should be added to the model. * * @param useBias {@code true} to add a bias term, {@code false} to exclude * the bias term. */ public void setUseBias(boolean useBias) { this.useBias = useBias; } /** * Returns {@code true} if a bias term is in use, {@code false} otherwise. * * @return {@code true} if a bias term is in use, {@code false} otherwise. */ public boolean isUseBias() { return useBias; } /** * Sets the regularizing prior used * * @param prior the prior to use */ public void setPrior(Prior prior) { this.prior = prior; } /** * Returns the regularizing prior in use * * @return the regularizing prior in use */ public Prior getPrior() { return prior; } /** * Returns the weight vector used to compute results via a dot product. <br> * Do not modify this value, or you will alter the results returned. * @return the learned weight vector for prediction */ public Vec getWeightVec() { return w; } @Override public Vec getRawWeight() { return w; } @Override public double getBias() { return bias; } @Override public Vec getRawWeight(int index) { if(index < 1) return getRawWeight(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public double getBias(int index) { if (index < 1) return getBias(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public int numWeightsVecs() { return 1; } @Override public CategoricalResults classify(DataPoint data) { return LogisticLoss.classify(w.dot(data.getNumericalValues()) + bias); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { final int D = dataSet.getNumNumericalVars(); if (D <= 0) throw new FailedToFitException("Data set contains no numeric features"); final Vec[] columnMajor = dataSet.getNumericColumns(); w = new DenseVector(D); double[] delta = new double[useBias ? D + 1 : D]; Arrays.fill(delta, 1.0); final int N = dataSet.size(); double[] r = new double[N]; double[] y = new double[N]; for (int i = 0; i < N; i++) y[i] = dataSet.getDataPointCategory(i) * 2 - 1; final double lambda; if (autoSetRegularization) { //see equation (21) double normSqrdSum = 0; for (int i = 0; i < N; i++) normSqrdSum += pow(dataSet.getDataPoint(i).getNumericalValues().pNorm(2), 2); double sigma = D * N / normSqrdSum; //no regularization less than precision if (prior == Prior.LAPLACE) lambda = max(sqrt(2) / sigma, 1e-15); else lambda = max(sigma * sigma, 1e-15); } else lambda = regularization; double[] r_change = new double[N]; for (int iter = 0; iter < maxIterations; iter++) { for (int j = 0; j < D; j++) { double delta_vj = 0; final double w_jOrig = w.get(j); if (prior == Prior.LAPLACE) { //Algo 2 in the paper, computing delta_vj if (w_jOrig == 0) { //(try positive direction) delta_vj = tenativeUpdate(columnMajor, j, w_jOrig, y, r, lambda, 1.0, delta); if (delta_vj <= 0)//(positive direction failed) { //(try negative direction) delta_vj = tenativeUpdate(columnMajor, j, w_jOrig, y, r, lambda, -1.0, delta); if (delta_vj >= 0)//(negative direction failed) delta_vj = 0; } } else { final double sign = signum(w_jOrig); delta_vj = tenativeUpdate(columnMajor, j, w_jOrig, y, r, lambda, sign, delta); if (sign * (w_jOrig + delta_vj) < 0)//(cross over 0) delta_vj = -w_jOrig;//Done soe that w_j+-w_j = 0 } } else//Guassian prior { delta_vj = tenativeUpdate(columnMajor, j, w_jOrig, y, r, lambda, 0, delta); } double delta_wj = min(max(delta_vj, -delta[j]), delta[j]);//(limit step to trust region) for (IndexValue iv : columnMajor[j]) { final int i = iv.getIndex(); final double change = delta_wj * iv.getValue() * y[i]; r[i] += change; r_change[i] += change; } double newW_j = w_jOrig + delta_wj; //make tiny value zero if (abs(newW_j) < 1e-15)//Less than precions? its zero newW_j = 0; w.set(j, newW_j); delta[j] = max(2 * abs(delta_wj), delta[j] / 2); //(update size of trust region) } if (useBias)//update the bias term { double delta_vj; //Algo 2 in the paper, computing delta_vj if (bias == 0) { //(try positive direction) delta_vj = tenativeUpdate(null, D, bias, y, r, lambda, 1.0, delta); if (delta_vj <= 0)//(positive direction failed) { //(try negative direction) delta_vj = tenativeUpdate(null, D, bias, y, r, lambda, -1.0, delta); if (delta_vj >= 0)//(negative direction failed) delta_vj = 0; } } else { final double sign = signum(bias); delta_vj = tenativeUpdate(null, D, bias, y, r, lambda, sign, delta); if (sign * (bias + delta_vj) < 0)//(cross over 0) delta_vj = -bias;//Done soe that w_j+-w_j = 0 } double delta_wj = min(max(delta_vj, -delta[D]), delta[D]);//(limit step to trust region) for (int i = 0; i < N; i++) { final double change = delta_wj * y[i]; r[i] += change; r_change[i] += change; } double newW_j = bias + delta_wj; //make tiny value zero if (abs(newW_j) < 1e-15)//Less than precions? its zero newW_j = 0; bias = newW_j; delta[D] = max(2 * abs(delta_wj), delta[D] / 2); //(update size of trust region) } //Check for convergence double changeSum = 0, rSum = 0; for (int i = 0; i < N; i++) { changeSum += abs(r_change[i]); rSum += abs(r[i]); } if (changeSum / (1 + rSum) <= tolerance)//converged! break; Arrays.fill(r_change, 0.0);//resent changes for the next iteration } } private static double F(double r, double delta) { if (abs(r) <= delta) return 0.25; else return 1 / (2 + exp(abs(r) - delta) + exp(delta - abs(r))); } @Override public boolean supportsWeightedData() { return false; } @Override public BBR clone() { return new BBR(this); } /** * Gets the tentative update &delta;<sub>vj</sub> * * @param columnMajor the column major vector array. May be null if using * the implicit bias term * @param j the column to work on * @param w_j the value of the coefficient, used only under Gaussian prior * @param y the array of label values * @param r the array of r values * @param lambda the regularization value to apply * @param s the update direction (should be +1 or -1). Used only under * Laplace prior * @param delta the array of delta values * @return the tentative update value */ private double tenativeUpdate(final Vec[] columnMajor, final int j, final double w_j, final double[] y, final double[] r, final double lambda, final double s, final double[] delta) { double numer = 0, denom = 0; if (columnMajor != null) { Vec col_j = columnMajor[j]; if (col_j.nnz() == 0) return 0; for (IndexValue iv : col_j) { final double x_ij = iv.getValue(); final int i = iv.getIndex(); numer += x_ij * y[i] / (1 + exp(r[i])); denom += x_ij * x_ij * F(r[i], delta[j] * abs(x_ij)); if (prior == Prior.LAPLACE) numer -= lambda * s; else { numer -= w_j / lambda; denom += 1 / lambda; } } } else//bias term, all x_ij = 1 for (int i = 0; i < y.length; i++) { numer += y[i] / (1 + exp(r[i])) - lambda * s; denom += F(r[i], delta[j]); } return numer / denom; } }
17,716
31.330292
184
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/LinearBatch.java
package jsat.classifiers.linear; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.*; import java.util.concurrent.atomic.DoubleAdder; import java.util.logging.Level; import java.util.logging.Logger; import jsat.DataSet; import jsat.SimpleWeightVectorModel; import jsat.classifiers.*; import jsat.distributions.Distribution; import jsat.distributions.LogUniform; import jsat.exceptions.FailedToFitException; import jsat.linear.ConcatenatedVec; import jsat.linear.DenseVector; import jsat.linear.IndexValue; import jsat.linear.SubVector; import jsat.linear.Vec; import jsat.lossfunctions.LossC; import jsat.lossfunctions.LossFunc; import jsat.lossfunctions.LossMC; import jsat.lossfunctions.LossR; import jsat.lossfunctions.SoftmaxLoss; import jsat.math.Function; import jsat.math.FunctionVec; import jsat.math.optimization.*; import jsat.parameters.Parameterized; import jsat.regression.*; import jsat.utils.ListUtils; import jsat.utils.SystemInfo; import jsat.utils.concurrent.ParallelUtils; /** * LinearBatch learns either a classification or regression problem depending on * the {@link #setLoss(jsat.lossfunctions.LossFunc) loss function &#8467;(w,x)} * used. The solution attempts to minimize * <big>&sum;</big><sub>i</sub> &#8467;(w,x<sub>i</sub>) + * {@link #setLambda0(double) &lambda;<sub>0</sub>}/2 ||w||<sub>2</sub><sup>2</sup>, and is * trained using a batch optimization method. <br> * <br> * LinearBatch can be warm started from any model implementing the * {@link SimpleWeightVectorModel} interface. * <br> * <br> * Note: the current implementation does not currently use bias terms * @author Edward Raff */ public class LinearBatch implements Classifier, Regressor, Parameterized, SimpleWeightVectorModel, WarmClassifier, WarmRegressor { private static final long serialVersionUID = -446156124954287580L; /** * Weight vectors */ private Vec[] ws; /** * bias terms for each weight vector */ private double[] bs; private LossFunc loss; private double lambda0; private Optimizer optimizer; private double tolerance; private boolean useBiasTerm = true; /** * Creates a new Linear Batch learner for classification using a small * regularization term */ public LinearBatch() { this(new SoftmaxLoss(), 1e-6); } /** * Creates a new Linear Batch learner * @param loss the loss function to use * @param lambda0 the L<sub>2</sub> regularization term */ public LinearBatch(LossFunc loss, double lambda0) { this(loss, lambda0, 1e-3); } /** * Creates a new Linear Batch learner * @param loss the loss function to use * @param lambda0 the L<sub>2</sub> regularization term * @param tolerance the threshold for convergence */ public LinearBatch(LossFunc loss, double lambda0, double tolerance) { this(loss, lambda0, tolerance, null); } /** * Creates a new Linear Batch learner * @param loss the loss function to use * @param lambda0 the L<sub>2</sub> regularization term * @param tolerance the threshold for convergence * @param optimizer the batch optimization method to use */ public LinearBatch(LossFunc loss, double lambda0, double tolerance, Optimizer optimizer) { setLoss(loss); setLambda0(lambda0); setOptimizer(optimizer); setTolerance(tolerance); } /** * Copy constructor * @param toCopy the object to copy */ public LinearBatch(LinearBatch toCopy) { this(toCopy.loss.clone(), toCopy.lambda0, toCopy.tolerance, toCopy.optimizer == null ? null : toCopy.optimizer.clone()); if(toCopy.ws != null) { this.ws = new Vec[toCopy.ws.length]; for(int i = 0; i < toCopy.ws.length; i++) this.ws[i] = toCopy.ws[i].clone(); } if(toCopy.bs != null) this.bs = Arrays.copyOf(toCopy.bs, toCopy.bs.length); } public void setUseBiasTerm(boolean useBiasTerm) { this.useBiasTerm = useBiasTerm; } public boolean isUseBiasTerm() { return useBiasTerm; } /** * &lambda;<sub>0</sub> controls the L<sub>2</sub> regularization penalty. * @param lambda0 the L<sub>2</sub> regularization penalty to use */ public void setLambda0(double lambda0) { if(lambda0 < 0 || Double.isNaN(lambda0) || Double.isInfinite(lambda0)) throw new IllegalArgumentException("Lambda0 must be non-negative, not " + lambda0); this.lambda0 = lambda0; } /** * Returns the L<sub>2</sub> regularization term in use * @return the L<sub>2</sub> regularization term in use */ public double getLambda0() { return lambda0; } /** * Sets the loss function used for the model. The loss function controls * whether or not regression, binary classification, or multi-class * classification is supported. * @param loss the loss function to use */ public void setLoss(LossFunc loss) { this.loss = loss; } /** * Returns the loss function in use * @return the loss function in use */ public LossFunc getLoss() { return loss; } /** * Sets the method of batch optimization that will be used. {@code null} is * valid for this value, in which case the implementation will attempt to * select a reasonable optimizer automatically. <br> * <br> * NOTE: the current implementation requires the optimizer to work based off * only the function value and its derivative. * * @param optimizer the method to use for function minimization */ public void setOptimizer(Optimizer optimizer) { this.optimizer = optimizer; } /** * Returns the optimization method in use, or {@code null}. * @return the optimization method in use, or {@code null}. */ public Optimizer getOptimizer() { return optimizer; } /** * Sets the convergence tolerance to user for training. Smaller values reach * a more accuracy solution but may take longer to complete.<br> * While zero is a valid tolerance value, it is not usually useful in * practice. Values in [10<sup>-4</sup>, 10<sup>-2</sup>] are usually more * practical. * * @param tolerance the convergence tolerance */ public void setTolerance(double tolerance) { if(tolerance < 0 || Double.isNaN(tolerance) || Double.isInfinite(tolerance)) throw new IllegalArgumentException("Tolerance must be a non-negative constant, not " + tolerance); this.tolerance = tolerance; } /** * Returns the value of the convergence tolerance parameter * @return the convergence tolerance parameter */ public double getTolerance() { return tolerance; } @Override public CategoricalResults classify(DataPoint data) { final Vec x = data.getNumericalValues(); if(ws.length == 1) return ((LossC)loss).getClassification(ws[0].dot(x)+bs[0]); else { Vec pred = new DenseVector(ws.length); for(int i = 0; i < ws.length; i++) pred.set(i, ws[i].dot(x)+bs[i]); ((LossMC)loss).process(pred, pred); return ((LossMC)loss).getClassification(pred); } } @Override public double regress(DataPoint data) { final Vec x = data.getNumericalValues(); return ((LossR)loss).getRegression(ws[0].dot(x)+bs[0]); } @Override public void train(final ClassificationDataSet D, final boolean parallel) { train(D, null, parallel); } @Override public void train(ClassificationDataSet D, Classifier warmSolution, boolean parallel) { if(D.getNumNumericalVars() <= 0) throw new FailedToFitException("LinearBath requires numeric features to work"); if(!(loss instanceof LossC)) throw new FailedToFitException("Loss function " + loss.getClass().getSimpleName() + " does not support classification"); if(D.getClassSize() > 2) if (!(loss instanceof LossMC)) throw new FailedToFitException("Loss function " + loss.getClass().getSimpleName() + " does not support multi-class classification"); else { ws = new Vec[D.getClassSize()]; bs = new double[ws.length]; } else { ws = new Vec[1]; bs = new double[1]; } for (int i = 0; i < ws.length; i++) ws[i] = new DenseVector(D.getNumNumericalVars()); Optimizer optimizerToUse; if(optimizer == null) optimizerToUse = new LBFGS(10); else optimizerToUse = optimizer.clone(); doWarmStartIfNotNull(warmSolution); ExecutorService threadPool = ParallelUtils.getNewExecutor(parallel); if(ws.length == 1) { if(useBiasTerm) { //Special wrapper class that will handle it - tight coupling with the implementation of LossFun and GradFunc Vec w_tmp = new VecWithBias(ws[0], bs); optimizerToUse.optimize(tolerance, w_tmp, w_tmp, new LossFunction(D, loss), new GradFunction(D, loss), parallel); } else optimizerToUse.optimize(tolerance, ws[0], ws[0], new LossFunction(D, loss), new GradFunction(D, loss), parallel); } else { LossMC lossMC = (LossMC) loss; ConcatenatedVec wAll; if(useBiasTerm)//append bias terms and logic in the Loss and Grad functions wil handle it { ArrayList<Vec> vecs = new ArrayList<>(Arrays.asList(ws)); vecs.add(DenseVector.toDenseVec(bs)); wAll = new ConcatenatedVec(vecs); } else wAll = new ConcatenatedVec(Arrays.asList(ws)); optimizerToUse.optimize(tolerance, wAll, new DenseVector(wAll), new LossMCFunction(D, lossMC), new GradMCFunction(D, lossMC), parallel); } threadPool.shutdownNow(); } /** * Performs a warm start if the given object is of the appropriate class. * Nothing happens if input it null. * * @param warmSolution * @throws FailedToFitException */ private void doWarmStartIfNotNull(Object warmSolution) throws FailedToFitException { if(warmSolution != null ) { if(warmSolution instanceof SimpleWeightVectorModel) { SimpleWeightVectorModel warm = (SimpleWeightVectorModel) warmSolution; if(warm.numWeightsVecs() != ws.length) throw new FailedToFitException("Warm solution has " + warm.numWeightsVecs() + " weight vectors instead of " + ws.length); for(int i = 0; i < ws.length; i++) { warm.getRawWeight(i).copyTo(ws[i]); if(useBiasTerm) bs[i] = warm.getBias(i); } } else throw new FailedToFitException("Can not warm warm from " + warmSolution.getClass().getCanonicalName()); } } @Override public void train(RegressionDataSet D, boolean parallel) { train(D, this, parallel); } @Override public void train(RegressionDataSet dataSet, Regressor warmSolution) { train(dataSet, warmSolution, false); } @Override public void train(RegressionDataSet D, Regressor warmSolution, boolean parallel) { if(D.getNumNumericalVars() <= 0) throw new FailedToFitException("LinearBath requires numeric features to work"); if(!(loss instanceof LossR)) throw new FailedToFitException("Loss function " + loss.getClass().getSimpleName() + " does not regression"); ws = new Vec[]{ new DenseVector(D.getNumNumericalVars()) }; bs = new double[1]; Optimizer optimizerToUse; if(optimizer == null) optimizerToUse = new LBFGS(10); else optimizerToUse = optimizer.clone(); doWarmStartIfNotNull(warmSolution); ExecutorService threadPool = ParallelUtils.getNewExecutor(parallel); if(useBiasTerm) { Vec w_tmp = new VecWithBias(ws[0], bs); optimizerToUse.optimize(tolerance, w_tmp, w_tmp, new LossFunction(D, loss), new GradFunction(D, loss), parallel); } else optimizerToUse.optimize(tolerance, ws[0], ws[0], new LossFunction(D, loss), new GradFunction(D, loss), parallel); threadPool.shutdownNow(); } private static double getTargetY(DataSet D, int i) { double y; if (D instanceof ClassificationDataSet) y = ((ClassificationDataSet) D).getDataPointCategory(i) * 2 - 1; else y = ((RegressionDataSet) D).getTargetValue(i); return y; } @Override public boolean warmFromSameDataOnly() { return false; } @Override public Vec getRawWeight(int index) { return ws[index]; } @Override public double getBias(int index) { return bs[index]; } @Override public int numWeightsVecs() { return ws.length; } private class VecWithBias extends Vec { public Vec w; public double[] b; public VecWithBias(Vec w, double[] b) { this.w = w; this.b = b; } //2 hacks below to make the original code work with bias terms "transparently" This means we need to know which functions will be called with a miss-matched size @Override public double dot(Vec v) { if(v.length() == w.length()) return w.dot(v)+b[0]; return super.dot(v); } @Override public void mutableAdd(double c, Vec b) { if(b.length() == w.length()) { w.mutableAdd(c, b); this.b[0] += c; } else super.mutableAdd(c, b); } @Override public int length() { return w.length()+1; } @Override public double get(int index) { if(index < w.length()) return w.get(index); else if (index == w.length()) return b[0]; else throw new IndexOutOfBoundsException(); } @Override public void set(int index, double val) { if(index < w.length()) w.set(index, val); else if (index == w.length()) b[0] = val; else throw new IndexOutOfBoundsException(); } @Override public boolean isSparse() { return w.isSparse(); } @Override public Vec clone() { return new VecWithBias(w.clone(), Arrays.copyOf(b, b.length)); } @Override public void setLength(int length) { throw new UnsupportedOperationException("Not supported yet."); } } /** * Function for using the single weight vector loss functions related to * {@link LossC} and {@link LossR}. */ public class LossFunction implements Function { private static final long serialVersionUID = -576682206943283356L; private final DataSet D; private final LossFunc loss; public LossFunction(DataSet D, LossFunc loss) { this.D = D; this.loss = loss; } @Override public double f(Vec w, boolean parallel) { DoubleAdder sum = new DoubleAdder(); DoubleAdder weightSum = new DoubleAdder(); ParallelUtils.run(parallel, D.size(), (start, end)-> { for(int i = start; i < end; i++) { DataPoint dp = D.getDataPoint(i); Vec x = dp.getNumericalValues(); double y = getTargetY(D, i); sum.add(loss.getLoss(w.dot(x), y)*D.getWeight(i)); weightSum.add(D.getWeight(i)); } }); if(lambda0 > 0) return sum.sum()/weightSum.sum() + lambda0*w.dot(w); else return sum.sum()/weightSum.sum(); } } /** * Function for using the single weight vector loss functions related to * {@link LossC} and {@link LossR} */ public class GradFunction implements FunctionVec { private final DataSet D; private final LossFunc loss; private ThreadLocal<Vec> tempVecs; public GradFunction(DataSet D, LossFunc loss) { this.D = D; this.loss = loss; } @Override public Vec f(Vec w, Vec s, boolean parallel) { if(s == null) s = w.clone(); s.zeroOut(); DoubleAdder weightSum = new DoubleAdder(); ThreadLocal<Vec> tl_s = ThreadLocal.withInitial(s::clone); ParallelUtils.run(parallel, D.size(), (start, end)-> { Vec s_l = tl_s.get(); for (int i = start; i < end; i++) { DataPoint dp = D.getDataPoint(i); Vec x = dp.getNumericalValues(); double y = getTargetY(D, i); s_l.mutableAdd(loss.getDeriv(w.dot(x), y)*D.getWeight(i), x); weightSum.add(D.getWeight(i)); } return s_l; }, (a,b)->a.add(b)) .copyTo(s); s.mutableDivide(weightSum.sum()); if(lambda0 > 0) s.mutableSubtract(lambda0, w); return s; } } public class LossMCFunction implements Function { private static final long serialVersionUID = -861700500356609563L; private final ClassificationDataSet D; private final LossMC loss; public LossMCFunction(ClassificationDataSet D, LossMC loss) { this.D = D; this.loss = loss; } @Override public double f(Vec w, boolean parallel) { DoubleAdder sum = new DoubleAdder(); Vec pred = new DenseVector(D.getClassSize());//store the predictions in //bias terms are at the end, treat them seperate and special final int subWSize = (w.length() - (useBiasTerm ? bs.length : 0) )/D.getClassSize(); DoubleAdder weightSum = new DoubleAdder(); ParallelUtils.run(parallel, D.size(), (start, end)-> { Vec pred_local = pred.clone(); for (int i = start; i < end; i++) { DataPoint dp = D.getDataPoint(i); Vec x = dp.getNumericalValues(); for(int k = 0; k < pred_local.length(); k++) pred_local.set(k, new SubVector(k*subWSize, subWSize, w).dot(x)); if(useBiasTerm) pred_local.mutableAdd(new SubVector(w.length()-bs.length, bs.length, w)); loss.process(pred_local, pred_local); int y = D.getDataPointCategory(i); sum.add(loss.getLoss(pred_local, y)*D.getWeight(i)); weightSum.add(D.getWeight(i)); } }); if(lambda0 > 0 ) return sum.sum()/weightSum.sum() + lambda0*w.dot(w); return sum.sum(); } } private class GradMCFunction implements FunctionVec { private final ClassificationDataSet D; private final LossMC loss; private ThreadLocal<Vec> tempVecs; public GradMCFunction(ClassificationDataSet D, LossMC loss) { this.D = D; this.loss = loss; } @Override public Vec f(Vec w, Vec s, boolean parllel) { if(s == null) s = w.clone(); s.zeroOut(); ThreadLocal<Vec> tl_s = ThreadLocal.withInitial(s::clone); Vec pred = new DenseVector(D.getClassSize());//store the predictions in final int subWSize = (w.length() - (useBiasTerm ? bs.length : 0) )/D.getClassSize(); DoubleAdder weightSum = new DoubleAdder(); ParallelUtils.run(parllel, D.size(), (start, end)-> { Vec s_l = tl_s.get(); Vec pred_local = pred.clone(); for (int i = start; i < end; i++) { DataPoint dp = D.getDataPoint(i); Vec x = dp.getNumericalValues(); for(int k = 0; k < pred_local.length(); k++) pred_local.set(k, new SubVector(k*subWSize, subWSize, w).dot(x)); if(useBiasTerm) pred_local.mutableAdd(new SubVector(w.length()-bs.length, bs.length, w)); loss.process(pred_local, pred_local); int y = D.getDataPointCategory(i); loss.deriv(pred_local, pred_local, y); for(int k = 0; k < pred_local.length(); k++) new SubVector(k*subWSize, subWSize, s_l).mutableAdd(pred_local.get(k)*D.getWeight(i), x); weightSum.add(D.getWeight(i)); } return s_l; }, (a,b)->a.add(b)).copyTo(s); s.mutableDivide(weightSum.sum()); if(lambda0 > 0) s.mutableSubtract(lambda0, w); return s; } } @Override public boolean supportsWeightedData() { return true; } @Override public LinearBatch clone() { return new LinearBatch(this); } /** * Guess the distribution to use for the regularization term * {@link #setLambda0(double) &lambda;<sub>0</sub>} . * * @param d the data set to get the guess for * @return the guess for the &lambda;<sub>0</sub> parameter */ public static Distribution guessLambda0(DataSet d) { return new LogUniform(1e-7, 1e-2); } }
23,065
31.170153
169
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/LinearL1SCD.java
package jsat.classifiers.linear; import java.util.Arrays; import java.util.Random; import java.util.concurrent.ExecutorService; import jsat.DataSet; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.DataPoint; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.linear.DenseVector; import jsat.linear.IndexValue; import jsat.linear.SparseVector; import jsat.linear.Vec; import jsat.regression.RegressionDataSet; import jsat.utils.random.RandomUtil; /** * Implements an iterative and single threaded form of fast * Stochastic Coordinate Decent for optimizing L<sub>1</sub> regularized * linear regression problems. It performs very well when the number of data * points is very large, especially when the feature count is smaller in * comparison. It also works well on sparse data sets. <br> * Using the squared loss is equivalent to LASSO regression, and the LOG loss * is equivalent to logistic regression. <br> * <br> * Note: This implementation requires all feature values to be in the range * [-1, 1]. By default scaling is performed to [0,1]. If your data is dense or * has negative and positive feature values, scaling to [-1, 1] may perform * better. * See {@link #setReScale(boolean) }<br> * <br> * See:<br> * <a href="http://eprints.pascal-network.org/archive/00005418/">Shalev-Shwartz, * S.,&amp;Tewari, A. (2009). <i>Stochastic Methods for L<sub>1</sub>-regularized * Loss Minimization</i>. 26th International Conference on Machine Learning * (Vol. 12, pp. 929–936).</a> * * @author Edward Raff */ public class LinearL1SCD extends StochasticSTLinearL1 { private static final long serialVersionUID = 3135562347568407186L; /** * Creates a new SCD L<sub>1</sub> learner using default settings. */ public LinearL1SCD() { this(DEFAULT_EPOCHS, DEFAULT_REG, DEFAULT_LOSS); } /** * Creates a new SCD L<sub>1</sub> learner. * @param epochs the number of learning iterations * @param lambda the regularization penalty * @param loss the loss function to use */ public LinearL1SCD(int epochs, double lambda, Loss loss) { this(epochs, lambda, loss, true); } /** * Creates a new SCD L<sub>1</sub> learner. * @param epochs the number of learning iterations * @param lambda the regularization penalty * @param loss the loss function to use * @param reScale whether or not to rescale the feature values */ public LinearL1SCD(int epochs, double lambda, Loss loss, boolean reScale) { setEpochs(epochs); setLambda(lambda); setLoss(loss); setReScale(reScale); } @Override public CategoricalResults classify(DataPoint data) { if(w == null) throw new UntrainedModelException("Model has not been trained"); Vec x = data.getNumericalValues(); return loss.classify(wDot(x)); } @Override public double regress(DataPoint data) { if(w == null) throw new UntrainedModelException("Model has not been trained"); Vec x = data.getNumericalValues(); return loss.regress(wDot(x)); } /** * Performs rescaling as requested or throws an exception if a violation was * encountered * @param featureVals the array of feature values * @param m the number of data points * @throws FailedToFitException */ private void featureScaleCheck(Vec[] featureVals, int m) throws FailedToFitException { if(reScale) for(int j = 0; j < featureVals.length; j++) { if(obvMin[j] == 0 && minScaled == 0)//We can skip 1st and last step { featureVals[j].mutableMultiply(maxScaled/obvMax[j]); } else//do all steps { featureVals[j].mutableSubtract(obvMin[j]); featureVals[j].mutableMultiply((maxScaled-minScaled)/(obvMax[j]-obvMin[j])); featureVals[j].mutableAdd(minScaled); } //If we are not sparse enough after re-scaling, transform back if(featureVals[j].isSparse() && featureVals[j].nnz() > m*0.75) featureVals[j] = new DenseVector(featureVals[j]); } else //Check for violations for(int j = 0; j < obvMin.length; j++) if(obvMax[j] > 1 || obvMin[j] < -1) throw new FailedToFitException("All feature values must be in the range [-1,1]"); } private void setUpFeatureVals(Vec[] featureVals, boolean sparse, int m, DataSet dataSet) { //All feature values need to be scaled into -1, 1 obvMin = new double[featureVals.length]; Arrays.fill(obvMin, Double.POSITIVE_INFINITY); obvMax = new double[featureVals.length]; Arrays.fill(obvMax, Double.NEGATIVE_INFINITY); for(int i = 0; i < featureVals.length; i++) featureVals[i] = sparse ? new SparseVector(m) : new DenseVector(m); if(sparse) Arrays.fill(obvMin, 0.0); for(int i = 0; i < dataSet.size(); i++) { Vec x = dataSet.getDataPoint(i).getNumericalValues(); for(IndexValue iv : x) { int j = iv.getIndex(); double v = iv.getValue(); featureVals[j].set(i, v); obvMax[j] = Math.max(obvMax[j], v); obvMin[j] = Math.min(obvMin[j], v); } } } @Override public void train(RegressionDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(RegressionDataSet dataSet) { boolean sparse = dataSet.getDataPoint(0).getNumericalValues().isSparse(); int m = dataSet.size(); Vec[] featureVals = new Vec[dataSet.getNumNumericalVars()]; for(int i = 0; i < featureVals.length; i++) featureVals[i] = sparse ? new SparseVector(m) : new DenseVector(m); setUpFeatureVals(featureVals, sparse, m, dataSet); featureScaleCheck(featureVals, m); double[] target = new double[m]; for(int i = 0; i < dataSet.size(); i++) target[i] = dataSet.getTargetValue(i); train(featureVals, target); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { if(dataSet.getClassSize() != 2) throw new FailedToFitException("Only binary classification problems are supported"); boolean sparse = dataSet.getDataPoint(0).getNumericalValues().isSparse(); int m = dataSet.size(); Vec[] featureVals = new Vec[dataSet.getNumNumericalVars()]; setUpFeatureVals(featureVals, sparse, m, dataSet); featureScaleCheck(featureVals, m); double[] target = new double[m]; for(int i = 0; i < dataSet.size(); i++) target[i] = dataSet.getDataPointCategory(i)*2-1; train(featureVals, target); } /** * * @param featureVals a vector for each feature, where each vector contains all values for the feature in dataset order * @param target target values */ private void train(Vec[] featureVals, double[] target) { final int d = featureVals.length; final int m = target.length; w = new DenseVector(d); final double[] z = new double[m]; final double beta = loss.beta(); Random rand = RandomUtil.getRandom(); for (int t = 1; t <= epochs; t++) { int j = rand.nextInt(d + 1);//+1 for the bias term double g = 0.0; if (j < d) { Vec xj = featureVals[j]; for (IndexValue iv : xj) { int i = iv.getIndex(); g += loss.deriv(z[i], target[i]) * iv.getValue(); } } else//Bias term update, all x[i]_j = 1 { for (int i = 0; i < target.length; i++) g += loss.deriv(z[i], target[i]); } g /= m; double eta; double w_j = j == d ? bias : w.get(j); if (w_j - g / beta > lambda / beta) eta = -g / beta - lambda / beta; else if (w_j - g / beta < -lambda / beta) eta = -g / beta + lambda / beta; else eta = -w_j; if (j < d) w.increment(j, eta); else bias += eta; if (j < d) for (IndexValue iv : featureVals[j]) z[iv.getIndex()] += eta * iv.getValue(); else//Bias update, all x[i]_j = 1 for (int i = 0; i < target.length; i++) z[i] += eta; } } @Override public boolean supportsWeightedData() { return false; } @Override public LinearL1SCD clone() { LinearL1SCD clone = new LinearL1SCD(epochs, lambda, loss, reScale); if(this.w != null) clone.w = this.w.clone(); clone.bias = this.bias; clone.minScaled = this.minScaled; clone.maxScaled = this.maxScaled; if(this.obvMin != null) clone.obvMin = Arrays.copyOf(this.obvMin, this.obvMin.length); if(this.obvMax != null) clone.obvMax = Arrays.copyOf(this.obvMax, this.obvMax.length); return clone; } }
9,899
32.559322
123
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/LinearSGD.java
package jsat.classifiers.linear; import java.util.Arrays; import java.util.List; import java.util.concurrent.ExecutorService; import jsat.DataSet; import jsat.SimpleWeightVectorModel; import jsat.classifiers.BaseUpdateableClassifier; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.classifiers.DataPoint; import jsat.distributions.Distribution; import jsat.distributions.LogUniform; import jsat.exceptions.FailedToFitException; import jsat.linear.DenseVector; import jsat.linear.IndexValue; import jsat.linear.ScaledVector; import jsat.linear.Vec; import jsat.lossfunctions.HingeLoss; import jsat.lossfunctions.LossC; import jsat.lossfunctions.LossFunc; import jsat.lossfunctions.LossMC; import jsat.lossfunctions.LossR; import jsat.math.decayrates.DecayRate; import jsat.math.decayrates.PowerDecay; import jsat.math.optimization.stochastic.GradientUpdater; import jsat.math.optimization.stochastic.SimpleSGD; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.regression.BaseUpdateableRegressor; import jsat.regression.RegressionDataSet; import jsat.regression.UpdateableRegressor; /** * LinearSGD learns either a classification or regression problem depending on * the {@link #setLoss(jsat.lossfunctions.LossFunc) loss function &#x2113;(w,x)} * used. The solution attempts to minimize * <big>&sum;</big><sub>i</sub> &#x2113;(w,x<sub>i</sub>) + * {@link #setLambda0(double) &lambda;<sub>0</sub>}/2 ||w||<sub>2</sub><sup>2</sup> + * {@link #setLambda1(double) &lambda;<sub>1</sub>} ||w||<sub>1</sub>, and is * trained by Stochastic Gradient Descent. <br> * <br> * <br> * NOTE: To support L<sub>1</sub> regularization with sparse results and online * learning at the same time, the normalization of the regularization penalty by * the number of data points is not present in the implementation at this time. * Setting {@link #setLambda1(double) &lambda;<sub>1</sub>} to the desired value * divided by the number of unique data points in the whole set will result in * the correct regularization penalty being applied. * * See: * <ul> * <li>Tsuruoka, Y., Tsujii, J.,&amp;Ananiadou, S. (2009). <i>Stochastic gradient * descent training for L1-regularized log-linear models with cumulative * penalty</i>. Proceedings of the Joint Conference of the 47th Annual Meeting * of the ACL and the 4th International Joint Conference on Natural Language * Processing of the AFNLP, 1, 477. doi:10.3115/1687878.1687946</li> * </ul> * @author Edward Raff */ public class LinearSGD extends BaseUpdateableClassifier implements UpdateableRegressor, Parameterized, SimpleWeightVectorModel { private static final long serialVersionUID = -59695592724956535L; private LossFunc loss; private GradientUpdater gradientUpdater; private double eta; private DecayRate decay; private Vec[] ws; private GradientUpdater[] gus; private double[] bs; private int time; private double lambda0; private double lambda1; private double l1U; private double[][] l1Q; private boolean useBias = true; /** * Creates a new LinearSGD learner for multi-class classification problems. */ public LinearSGD() { this(new HingeLoss(), 1e-4, 0.0); } /** * Creates a new LinearSGD learner * @param loss the loss function to use * @param lambda0 the L<sub>2</sub> regularization term * @param lambda1 the L<sub>1</sub> regularization term */ public LinearSGD(LossFunc loss, double lambda0, double lambda1) { this(loss, 0.001, new PowerDecay(1, 0.1), lambda0, lambda1); } /** * Creates a new LinearSGD learner. * @param loss the loss function to use * @param eta the initial learning rate * @param decay the decay rate for &eta; * @param lambda0 the L<sub>2</sub> regularization term * @param lambda1 the L<sub>1</sub> regularization term */ public LinearSGD(LossFunc loss, double eta, DecayRate decay, double lambda0, double lambda1) { setLoss(loss); setEta(eta); setEtaDecay(decay); setGradientUpdater(new SimpleSGD()); setLambda0(lambda0); setLambda1(lambda1); } /** * Copy constructor * @param toClone the object to copy */ public LinearSGD(LinearSGD toClone) { this.loss = toClone.loss.clone(); this.eta = toClone.eta; this.decay = toClone.decay.clone(); this.time = toClone.time; this.lambda0 = toClone.lambda0; this.lambda1 = toClone.lambda1; this.l1U = toClone.l1U; this.useBias = toClone.useBias; this.gradientUpdater = toClone.gradientUpdater; if(toClone.l1Q != null) { this.l1Q = new double[toClone.l1Q.length][]; for(int i = 0; i < toClone.l1Q.length; i++) this.l1Q[i] = Arrays.copyOf(toClone.l1Q[i], toClone.l1Q[i].length); } if(toClone.ws != null) { this.ws = new Vec[toClone.ws.length]; this.bs = new double[toClone.bs.length]; this.gus = new GradientUpdater[toClone.gus.length]; for(int i = 0; i < ws.length; i++) { this.ws[i] = toClone.ws[i].clone(); this.bs[i] = toClone.bs[i]; this.gus[i] = toClone.gus[i].clone(); } } } /** * Sets the method that will be used to update the weight vectors given * their gradient information. * @param gradientUpdater the method to use for updating the weight vectors * from the gradient */ public void setGradientUpdater(GradientUpdater gradientUpdater) { if(gradientUpdater == null ) throw new IllegalArgumentException("Gradient updater must be non-null"); this.gradientUpdater = gradientUpdater; } /** * * @return the method to use for updating the weight vectors from the * gradient */ public GradientUpdater getGradientUpdater() { return gradientUpdater; } /** * Sets the rate at which {@link #setEta(double) &eta;} is decayed at each * update. * @param decay the decay rate to use */ public void setEtaDecay(DecayRate decay) { this.decay = decay; } /** * Returns the decay rate in use * @return the decay rate in use */ public DecayRate getEtaDecay() { return decay; } /** * Sets the initial learning rate &eta; to use. It should generally be in * (0, 1), but any positive value is acceptable. * @param eta the learning rate to use. */ public void setEta(double eta) { if(eta <= 0 || Double.isNaN(eta) || Double.isInfinite(eta)) throw new IllegalArgumentException("eta must be a positive constant, not " + eta); this.eta = eta; } /** * Returns the current learning rate in use * @return the current learning rate in use */ public double getEta() { return eta; } /** * Sets the loss function used for the model. The loss function controls * whether or not regression, binary classification, or multi-class * classification is supported. * @param loss the loss function to use */ public void setLoss(LossFunc loss) { this.loss = loss; } /** * Returns the loss function in use * @return the loss function in use */ public LossFunc getLoss() { return loss; } /** * &lambda;<sub>0</sub> controls the L<sub>2</sub> regularization penalty. * @param lambda0 the L<sub>2</sub> regularization penalty to use */ public void setLambda0(double lambda0) { if(lambda0 < 0 || Double.isNaN(lambda0) || Double.isInfinite(lambda0)) throw new IllegalArgumentException("Lambda0 must be non-negative, not " + lambda0); this.lambda0 = lambda0; } /** * Returns the L<sub>2</sub> regularization term in use * @return the L<sub>2</sub> regularization term in use */ public double getLambda0() { return lambda0; } /** * &lambda;<sub>1</sub> controls the L<sub>1</sub> regularization penalty. * @param lambda1 the L<sub>1</sub> regularization penalty to use */ public void setLambda1(double lambda1) { if(lambda1 < 0 || Double.isNaN(lambda1) || Double.isInfinite(lambda1)) throw new IllegalArgumentException("Lambda1 must be non-negative, not " + lambda1); this.lambda1 = lambda1; } /** * Returns the L<sub>1</sub> regularization term in use * @return the L<sub>1</sub> regularization term in use */ public double getLambda1() { return lambda1; } /** * Sets whether or not an implicit bias term will be added to the data set * @param useBias {@code true} to add an implicit bias term */ public void setUseBias(boolean useBias) { this.useBias = useBias; } /** * Returns whether or not an implicit bias term is in use * @return {@code true} if a bias term is in use */ public boolean isUseBias() { return useBias; } @Override public LinearSGD clone() { return new LinearSGD(this); } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { if(!(loss instanceof LossC)) throw new FailedToFitException("Loss function " + loss.getClass().getSimpleName() + " only supports regression"); if(predicting.getNumOfCategories() == 2) { ws = new Vec[1]; bs = new double[1]; gus = new GradientUpdater[1]; } else { if(!(loss instanceof LossMC)) throw new FailedToFitException("Loss function " + loss.getClass().getSimpleName() + " only supports binary classification"); ws = new Vec[predicting.getNumOfCategories()]; bs = new double[predicting.getNumOfCategories()]; gus = new GradientUpdater[predicting.getNumOfCategories()]; } setUpShared(numericAttributes); } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes) { if(!(loss instanceof LossR)) throw new FailedToFitException("Loss function " + loss.getClass().getSimpleName() + "does not support regression"); ws = new Vec[1]; bs = new double[1]; gus = new GradientUpdater[1]; setUpShared(numericAttributes); } private void setUpShared(int numericAttributes) { if(numericAttributes <= 0 ) throw new FailedToFitException("LinearSGD requires numeric features to use"); for(int i = 0; i < ws.length; i++) { ws[i] = new ScaledVector(new DenseVector(numericAttributes)); gus[i] = gradientUpdater.clone(); gus[i].setup(ws[i].length()); } time = 0; l1U = 0; if(lambda1 > 0) l1Q = new double[ws.length][ws[0].length()]; else l1Q = null; } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { final double eta_t = decay.rate(time++, eta); Vec x = dataPoint.getNumericalValues(); applyL2Reg(eta_t); //apply gradient updates if(ws.length == 1) { final double y = targetClass*2-1; final double lossD = ((LossC)loss).getDeriv(ws[0].dot(x)+bs[0], y); performGradientUpdate(0, eta_t, lossD, x); } else { Vec pred = new DenseVector(ws.length); for(int i = 0; i < ws.length; i++) pred.set(i, ws[i].dot(x)+bs[i]); ((LossMC)loss).process(pred, pred); ((LossMC)loss).deriv(pred, pred, targetClass); for(IndexValue iv : pred) { final int i = iv.getIndex(); final double lossD = iv.getValue(); performGradientUpdate(i, eta_t, lossD, x); } } applyL1Reg(eta_t, x); } /** * * @param i the index of the weight vector array to update * @param eta_t the learning rate to use * @param lossD the loss for the specified weight vector * @param x the input vector the loss was incurred on */ private void performGradientUpdate(final int i, final double eta_t, final double lossD, Vec x) { final Vec grad = new ScaledVector(lossD, x); if (useBias) bs[i] -= gus[i].update(ws[i], grad, eta_t, bs[i], lossD); else gus[i].update(ws[i], grad, eta_t); } @Override public void update(DataPoint dataPoint, double weight, double targetValue) { final double eta_t = decay.rate(time++, eta); Vec x = dataPoint.getNumericalValues(); applyL2Reg(eta_t); final double lossD = ((LossR)loss).getDeriv(ws[0].dot(x)+bs[0], targetValue); performGradientUpdate(0, eta_t, lossD, x); applyL1Reg(eta_t, x); } @Override public CategoricalResults classify(DataPoint data) { Vec x = data.getNumericalValues(); if(ws.length == 1) return ((LossC)loss).getClassification(ws[0].dot(x)+bs[0]); else { Vec pred = new DenseVector(ws.length); for(int i = 0; i < ws.length; i++) pred.set(i, ws[i].dot(x)+bs[i]); ((LossMC)loss).process(pred, pred); return ((LossMC)loss).getClassification(pred); } } @Override public double regress(DataPoint data) { Vec x = data.getNumericalValues(); return ((LossR)loss).getRegression(ws[0].dot(x)+bs[0]); } @Override public boolean supportsWeightedData() { return false; } /** * Applies L2 regularization to the model * @param eta_t the learning rate in use */ private void applyL2Reg(final double eta_t) { if(lambda0 > 0)//apply L2 regularization for(Vec v : ws) v.mutableMultiply(1-eta_t*lambda0); } /** * Applies L1 regularization to the model * @param eta_t the learning rate in use * @param x the input vector the update is from */ private void applyL1Reg(final double eta_t, Vec x) { //apply l1 regularization if(lambda1 > 0) { l1U += eta_t*lambda1;//line 6: in Tsuruoka et al paper, figure 2 for(int k = 0; k < ws.length; k++) { final Vec w_k = ws[k]; final double[] l1Q_k = l1Q[k]; for(IndexValue iv : x) { final int i = iv.getIndex(); //see "APPLYPENALTY(i)" on line 15: from Figure 2 in Tsuruoka et al paper final double z = w_k.get(i); double newW_i = 0; if (z > 0) newW_i = Math.max(0, z - (l1U + l1Q_k[i])); else if(z < 0) newW_i = Math.min(0, z + (l1U - l1Q_k[i])); l1Q_k[i] += (newW_i - z); w_k.set(i, newW_i); } } } } @Override public void train(RegressionDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(RegressionDataSet dataSet) { BaseUpdateableRegressor.trainEpochs(dataSet, this, getEpochs()); } @Override public Vec getRawWeight(int index) { return ws[index]; } @Override public double getBias(int index) { return bs[index]; } @Override public int numWeightsVecs() { return ws.length; } /** * Guess the distribution to use for the regularization term * {@link #setLambda0(double) &lambda;<sub>0</sub>} . * * @param d the data set to get the guess for * @return the guess for the &lambda;<sub>0</sub> parameter */ public static Distribution guessLambda0(DataSet d) { return new LogUniform(1e-7, 1e-2); } /** * Guess the distribution to use for the regularization term * {@link #setLambda0(double) &lambda;<sub>1</sub>} . * * @param d the data set to get the guess for * @return the guess for the &lambda;<sub>1</sub> parameter */ public static Distribution guessLambda1(DataSet d) { int N = d.size(); return new LogUniform(1e-7/N, 1e-3/N); } }
17,002
30.255515
140
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/LinearTools.java
package jsat.classifiers.linear; import java.util.List; import jsat.classifiers.ClassificationDataSet; import jsat.linear.DenseVector; import jsat.linear.Vec; import static java.lang.Math.*; import jsat.linear.IndexValue; /** * This class provides static helper methods that may be useful for various * linear models. * * @author Edward Raff */ public class LinearTools { private LinearTools() { } /** * If the linear model performs logistic regression regularized by &lambda; * ||w||<sub>1</sub>, this method computes the smallest value of lambda that * produces a weight vector of all zeros.<br> * <br> * Note, that the value returned depends on the data set size. If being used * to initialize the value of &lambda; for cross validation with k-folds, * the value (k-1)/k * &lambda; will be closer to the correct value of * &lambda; for each CV set. * * @param cds the data set that the model would be trained from * @return the smallest value of &lambda; that should produce all zeros. */ public static double maxLambdaLogisticL1(ClassificationDataSet cds) { /** * This code was ripped out/modified from NewGLMNET. It follows the * strategy laid out in Schmidt, M., Fung, G.,&amp;Rosaless, R. (2009). * Optimization Methods for L1-Regularization. Retrieved from * http://www.cs.ubc.ca/cgi-bin/tr/2009/TR-2009-19.pdf , where we use * the coordinate with the largest magnitude of the gradient */ /** * if w=0, then D_part[i] = 0.5 for all i */ final double D_part_i = 0.5; final int n = cds.getNumNumericalVars(); Vec delta_L = new DenseVector(n); if(cds.rowMajor()) { List<Vec> X = cds.getDataVectors(); for (int i = 0; i < X.size(); i++) { double y_i = cds.getDataPointCategory(i) * 2 - 1; Vec x = X.get(i); delta_L.mutableAdd(D_part_i * y_i, x); } } else { Vec[] cols = cds.getNumericColumns(); for(int j = 0; j < cds.getNumNumericalVars(); j++) { Vec X_j = cols[j]; for(IndexValue iv : X_j) { int i = iv.getIndex(); double y_i = cds.getDataPointCategory(i) * 2 - 1; delta_L.increment(j, D_part_i*y_i*iv.getValue()); } } } return max(abs(delta_L.max()), abs(delta_L.min())) / (cds.size()); } /** * Many linear classifiers can be phrased in two equivalent forms, that only change the notation for the regularized. These forms are:<br> * C <big>&sum;</big><sub>i</sub> &#x2113;(w,x<sub>i</sub>) + &Omega;(w) <br> * and<br> * 1/N <big>&sum;</big><sub>i</sub> &#x2113;(w,x<sub>i</sub>) + &lambda; &Omega;(w) <br> * * This method converts the regularization parameter &lambda; to the form used as C * * @param lambda the regularization parameter &lambda; * @param N the number of data points in the training set * @return */ public static double lambda2C(double lambda, double N) { return 1/(lambda*N); } /** * Many linear classifiers can be phrased in two equivalent forms, that only change the notation for the regularized. These forms are:<br> * C <big>&sum;</big><sub>i</sub> &#x2113;(w,x<sub>i</sub>) + &Omega;(w) <br> * and<br> * 1/N <big>&sum;</big><sub>i</sub> &#x2113;(w,x<sub>i</sub>) + &lambda; &Omega;(w) <br> * * This method converts the regularization parameter C to the form used as &lambda; * * @param C the regularization parameter C * @param N the number of data points in the training set * @return */ public static double c2Lambda(double C, double N) { return 1/(C*N); } }
4,001
34.105263
142
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/LogisticRegressionDCD.java
package jsat.classifiers.linear; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.ExecutorService; import jsat.DataSet; import jsat.SingleWeightVectorModel; import jsat.classifiers.*; import jsat.classifiers.svm.PlattSMO; import jsat.distributions.Distribution; import jsat.exceptions.FailedToFitException; import jsat.linear.DenseVector; import jsat.linear.Vec; import jsat.lossfunctions.LogisticLoss; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.utils.IntList; import jsat.utils.ListUtils; /** * This provides an implementation of regularized logistic regression using Dual * Coordinate Descent. This algorithm works well on both dense and sparse large * data sets. * <br><br> * The regularized problem is of the form:<br> * C <big>&Sigma;</big> log(1+exp(-y<sub>i</sub>w<sup>T</sup>x<sub>i</sub>)) + w<sup>T</sup>w/2 * <br><br> * See:<br> * Yu, H.-F., Huang, F.-L.,&amp;Lin, C.-J. (2010). <i>Dual Coordinate Descent * Methods for Logistic Regression and Maximum Entropy Models</i>. Machine * Learning, 85(1-2), 41–75. doi:10.1007/s10994-010-5221-8 * * @author Edward Raff */ public class LogisticRegressionDCD implements Classifier, Parameterized, SingleWeightVectorModel { private static final long serialVersionUID = -5813704270903243462L; private static final double eps_1 = 1e-3; private static final double eps_2 = 1e-8; private Vec w; private double bias; private boolean useBias; private double C; private int maxIterations; /** * Creates a new Logistic Regression learner that does no more than 100 * training iterations with a default regularization tradeoff of C = 1 */ public LogisticRegressionDCD() { this(1.0); } /** * Creates a new Logistic Regression learner that does no more than 100 * training iterations. * @param C the regularization tradeoff term */ public LogisticRegressionDCD(double C) { this(C, 100); } /** * Creates a new Logistic Regression learner * @param C the regularization tradeoff term * @param maxIterations the maximum number of iterations through the data set */ public LogisticRegressionDCD(double C, int maxIterations) { setC(C); setMaxIterations(maxIterations); } /** * Copy constructor * @param toCopy the object to copy */ protected LogisticRegressionDCD(LogisticRegressionDCD toCopy) { this(toCopy.C, toCopy.maxIterations); if(toCopy.w != null) this.w = toCopy.w.clone(); this.bias = toCopy.bias; this.useBias = toCopy.useBias; } /** * Sets the regularization trade-off term. larger values reduce the amount * of regularization, and smaller values increase the regularization. * * @param C the positive regularization tradeoff value */ public void setC(double C) { if(C <= 0 || Double.isInfinite(C) || Double.isNaN(C)) throw new IllegalArgumentException("C must be a positive constant, not " + C); this.C = C; } /** * Returns the regularization tradeoff parameter * @return the regularization tradeoff parameter */ public double getC() { return C; } /** * Sets the maximum number of iterations the algorithm is allowed to run * for. * @param maxIterations the maximum number of iterations */ public void setMaxIterations(int maxIterations) { if(maxIterations < 1) throw new IllegalArgumentException("iterations must be a positive value, not " + maxIterations); this.maxIterations = maxIterations; } /** * Returns the maximum number of iterations the algorithm is allowed to run * @return the maximum number of iterations the algorithm is allowed to run */ public int getMaxIterations() { return maxIterations; } /** * Sets whether or not an implicit bias term should be added to the model. * @param useBias {@code true} to add a bias term, {@code false} to exclude * the bias term. */ public void setUseBias(boolean useBias) { this.useBias = useBias; } /** * Returns {@code true} if a bias term is in use, {@code false} otherwise. * @return {@code true} if a bias term is in use, {@code false} otherwise. */ public boolean isUseBias() { return useBias; } @Override public Vec getRawWeight() { return w; } @Override public double getBias() { return bias; } @Override public Vec getRawWeight(int index) { if(index < 1) return getRawWeight(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public double getBias(int index) { if (index < 1) return getBias(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public int numWeightsVecs() { return 1; } @Override public CategoricalResults classify(DataPoint data) { return LogisticLoss.classify(w.dot(data.getNumericalValues())+bias); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { if(dataSet.getClassSize() != 2) throw new FailedToFitException("Logistic Regression is a binary classifier, can can not handle " + dataSet.getClassSize() + " class problems"); final int N = dataSet.size(); List<Vec> x = dataSet.getDataVectors(); double[] alpha = new double[N]; double[] alphaPrime = new double[N]; double[] Q_ii = new double[N]; int[] y = new int[N]; /* * All points start will small eps, because LR dosn't tend to zero out * coefficients. But we expect a few alphas to quickly go to larger * values. */ Arrays.fill(alpha, Math.min(eps_1*C, eps_2)); Arrays.fill(alphaPrime, C-alpha[0]); w = new DenseVector(dataSet.getNumNumericalVars()); bias = 0; for(int i = 0; i < N; i++) { y[i] = dataSet.getDataPointCategory(i)*2-1; Vec x_i = x.get(i); Q_ii[i] = x_i.dot(x_i); w.mutableAdd(alpha[0]*y[i], x_i);//all alpha are the same right now if(useBias) bias += alpha[0]*y[i]; } IntList permutation = new IntList(N); ListUtils.addRange(permutation, 0, N, 1); for(int iter = 0; iter < maxIterations; iter++) { Collections.shuffle(permutation); double maxChange = 0; for(int i : permutation) { Vec x_i = x.get(i); //Step 1. final double c1 = alpha[i], c2 = alphaPrime[i]; double a = Q_ii[i], b = y[i] * (w.dot(x_i) + bias); double z_m = (c2 - c1) / 2, s = c1 + c2; boolean case1 = z_m >= -b / a; double z;//see eq (35) if (case1) { if (c1 >= s / 2) z = 0.1 * c1; else z = c1; } else { if (c2 >= s / 2) z = 0.1 * c2; else z = c2; } //what if z is very small? Leave it alone.. if(z < 1e-20) continue; //Step 2. //Algorithm 4 solving equation (18) //would it really take more than 100 iterations? for(int subIter = 0; subIter < 100; subIter++) { double gP = Math.log(z/(C-z)); if(case1) gP += a*(z-c1)+ b; else gP += a*(z-c2)-b; //check if "0" if(Math.abs(gP) < 1e-6) break; double gPP= a + s/(z*(s-z)); double d = -gP/gPP; if(z + d <= 0) z *= 0.1;//unsepcified shrinkage term: just use 0.1 else z += d; } //Step 4. alpha_i = Z1, alpha'_i = Z2. if(case1) { alpha[i] = z; alphaPrime[i] = C-z; } else { alpha[i] = C-z; alphaPrime[i] = z; } //Step 3. w = w + (Z1 −alpha_i) yi xi double change = (alpha[i]-c1); w.mutableAdd(change*y[i], x_i); if(useBias) bias += change*y[i]; maxChange = Math.max(maxChange, change); } //Convergence check if(Math.abs(maxChange) < 1e-4) return; } } @Override public boolean supportsWeightedData() { return false; } @Override public LogisticRegressionDCD clone() { return new LogisticRegressionDCD(this); } /** * Returns the weight vector used to compute results via a dot product. <br> * Do not modify this value, or you will alter the results returned. * @return the learned weight vector for prediction */ public Vec getWeightVec() { return w; } /** * Guess the distribution to use for the regularization term * {@link #setC(double) C} in Logistic Regression. * * @param d the data set to get the guess for * @return the guess for the C parameter */ public static Distribution guessC(DataSet d) { return PlattSMO.guessC(d); } }
10,432
28.22409
155
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/NHERD.java
package jsat.classifiers.linear; import java.util.List; import jsat.DataSet; import jsat.SingleWeightVectorModel; import jsat.classifiers.BaseUpdateableClassifier; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.classifiers.DataPoint; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.distributions.Distribution; import jsat.distributions.LogUniform; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.linear.DenseVector; import jsat.linear.IndexValue; import jsat.linear.Matrix; import jsat.linear.Vec; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; /** * Implementation of the Normal Herd (NHERD) algorithm for learning a linear * binary classifier. It is related to both {@link AROW} and the PA-II variant * of {@link PassiveAggressive}. <br> * Unlike similar algorithms, several methods of using only the diagonal values * of the covariance are available. * <br> * NOTE: This implementation does not add an implicit bias term, so the solution * goes through the origin * <br><br> * See:<br> * Crammer, K.,&amp;Lee, D. D. (2010). <i>Learning via Gaussian Herding</i>. * Pre-proceeding of NIPS (pp. 451–459). Retrieved from * <a href="http://webee.technion.ac.il/Sites/People/koby/publications/gaussian_mob_nips10.pdf"> * here</a> * * @author Edward Raff */ public class NHERD extends BaseUpdateableClassifier implements BinaryScoreClassifier, Parameterized, SingleWeightVectorModel { private static final long serialVersionUID = -1186002893766449917L; private Vec w; /** * Full covariance matrix */ private Matrix sigmaM; /** * Diagonal only covariance matrix */ private Vec sigmaV; private CovMode covMode; private double C; /** * Temp vector used to store Sigma * x_t */ private Vec Sigma_xt; /** * Sets what form of covariance matrix to use */ public static enum CovMode { /** * Use the full covariance matrix */ FULL, /** * Standard diagonal method, only the diagonal values get updated by * dropping the other terms. */ DROP, /** * Creates the diagonal by dropping the terms of the inverse of the * covariance matrix that is used to perform the update. This authors * suggest this is usually the best for diagonal covariance matrices * from empirical testing. */ PROJECT, /** * Creates the diagonal by solving the derivative with respect to the * specific objective function of NHERD */ EXACT } /** * Creates a new NHERD learner * @param C the aggressiveness parameter * @param covMode how to form the covariance matrix * @see #setC(double) * @see #setCovMode(jsat.classifiers.linear.NHERD.CovMode) */ public NHERD(double C, CovMode covMode) { setC(C); setCovMode(covMode); } /** * Copy constructor * @param other the object to copy */ protected NHERD(NHERD other) { this.C = other.C; this.covMode = other.covMode; if(other.w != null) this.w = other.w.clone(); if(other.sigmaM != null) this.sigmaM = other.sigmaM.clone(); if(other.sigmaV != null) this.sigmaV = other.sigmaV.clone(); if(other.Sigma_xt != null) this.Sigma_xt = other.Sigma_xt.clone(); } /** * Set the aggressiveness parameter. Increasing the value of this parameter * increases the aggressiveness of the algorithm. It must be a positive * value. This parameter essentially performs a type of regularization on * the updates * * @param C the positive aggressiveness parameter */ public void setC(double C) { if(Double.isNaN(C) || Double.isInfinite(C) || C <= 0) throw new IllegalArgumentException("C must be a postive constant, not " + C); this.C = C; } /** * Returns the aggressiveness parameter * @return the aggressiveness parameter */ public double getC() { return C; } /** * Sets the way in which the covariance matrix is formed. If using the full * covariance matrix, rank-1 updates mean updates to the model take * <i>O(d<sup>2</sup>)</i> time, where <i>d</i> is the dimension of the * input. Runtime can be reduced by using only the diagonal of the matrix to * perform updates in <i>O(s)</i> time, where <i>s &le; d</i> is the number * of non-zero values in the input * * @param covMode the way to form the covariance matrix */ public void setCovMode(CovMode covMode) { this.covMode = covMode; } /** * Returns the mode for forming the covariance * @return the mode for forming the covariance */ public CovMode getCovMode() { return covMode; } /** * Returns the weight vector used to compute results via a dot product. <br> * Do not modify this value, or you will alter the results returned. * @return the learned weight vector for prediction */ public Vec getWeightVec() { return w; } @Override public Vec getRawWeight() { return w; } @Override public double getBias() { return 0; } @Override public Vec getRawWeight(int index) { if(index < 1) return getRawWeight(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public double getBias(int index) { if (index < 1) return getBias(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public int numWeightsVecs() { return 1; } @Override public NHERD clone() { return new NHERD(this); } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { if(numericAttributes <= 0) throw new FailedToFitException("AROW requires numeric attributes to perform classification"); else if(predicting.getNumOfCategories() != 2) throw new FailedToFitException("AROW is a binary classifier"); w = new DenseVector(numericAttributes); Sigma_xt = new DenseVector(numericAttributes); if(covMode != CovMode.FULL) { sigmaV = new DenseVector(numericAttributes); sigmaV.mutableAdd(1); } else sigmaM = Matrix.eye(numericAttributes); } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { Vec x_t = dataPoint.getNumericalValues(); double y_t = targetClass*2-1; double pred = x_t.dot(w); if(y_t*pred > 1) return;//No update needed //else, wrong label or margin too small double alpha; if(covMode != CovMode.FULL) { alpha = 0; //Faster to set only the needed final values for (IndexValue iv : x_t) { double x_ti = iv.getValue(); alpha += x_ti * x_ti * sigmaV.get(iv.getIndex()); } } else { sigmaM.multiply(x_t, 1, Sigma_xt); alpha = x_t.dot(Sigma_xt); } final double loss = Math.max(0, 1 - y_t * pred); final double w_c = y_t * loss / (alpha + 1 / C); if (covMode == CovMode.FULL) w.mutableAdd(w_c, Sigma_xt); else for (IndexValue iv : x_t) w.increment(iv.getIndex(), w_c * iv.getValue() * sigmaV.get(iv.getIndex())); double numer = C*(C*alpha+2); double denom = (1+C*alpha)*(1+C*alpha); switch (covMode) { case FULL: Matrix.OuterProductUpdate(sigmaM, Sigma_xt, Sigma_xt, -numer/denom); break; case DROP: final double c = -numer/denom; for (IndexValue iv : x_t) { int idx = iv.getIndex(); double x_ti = iv.getValue()*sigmaV.get(idx); sigmaV.increment(idx, c*x_ti*x_ti); } break; case PROJECT: for(IndexValue iv : x_t)//only the nonzero values in x_t will cause a change in value { int idx = iv.getIndex(); double x_r = iv.getValue(); double S_rr = sigmaV.get(idx); sigmaV.set(idx, 1/(1/S_rr+numer*x_r*x_r)); } break; case EXACT: for(IndexValue iv : x_t)//only the nonzero values in x_t will cause a change in value { int idx = iv.getIndex(); double x_r = iv.getValue(); double S_rr = sigmaV.get(idx); sigmaV.set(idx, S_rr/(Math.pow(S_rr*x_r*x_r*C+1, 2))); } break; } //zero out temp space if(covMode == CovMode.FULL) Sigma_xt.zeroOut(); } @Override public CategoricalResults classify(DataPoint data) { if(w == null) throw new UntrainedModelException("Model has not yet ben trained"); CategoricalResults cr = new CategoricalResults(2); double score = getScore(data); if(score < 0) cr.setProb(0, 1.0); else cr.setProb(1, 1.0); return cr; } @Override public double getScore(DataPoint dp) { return w.dot(dp.getNumericalValues()); } @Override public boolean supportsWeightedData() { return false; } /** * Guess the distribution to use for the regularization term * {@link #setC(double) C} . * * @param d the data set to get the guess for * @return the guess for the C parameter */ public static Distribution guessC(DataSet d) { return new LogUniform(Math.pow(2, -4), Math.pow(2, 4));//from Exact Soft Confidence-Weighted Learning paper } }
10,601
28.780899
124
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/NewGLMNET.java
package jsat.classifiers.linear; import java.util.List; import java.util.concurrent.ExecutorService; import jsat.classifiers.*; import jsat.linear.DenseVector; import jsat.linear.Vec; import jsat.utils.IntList; import jsat.utils.ListUtils; import static java.lang.Math.*; import java.util.*; import jsat.DataSet; import jsat.SimpleWeightVectorModel; import jsat.SingleWeightVectorModel; import jsat.distributions.Distribution; import jsat.distributions.LogUniform; import jsat.distributions.Uniform; import jsat.exceptions.FailedToFitException; import jsat.linear.*; import jsat.lossfunctions.LogisticLoss; import jsat.parameters.Parameter; import jsat.parameters.Parameter.WarmParameter; import jsat.parameters.Parameterized; import jsat.utils.IntSet; /** * NewGLMNET is a batch method for solving Elastic Net regularized Logistic * Regression problems of the form <br> * 0.5 * (1-&alpha;) ||w||<sub>2</sub> + &alpha; * ||w||<sub>1</sub> + C * * <big>&sum;</big><sup>N</sup><sub>i=1</sub> &#8467; (w<sup>T</sup> x<sub>i</sub> + b, y<sub>i</sub>). * <br> * <br> * For &alpha; = 1, this becomes pure Lasso / L<sub>1</sub> regularized Logistic * Regression. For &alpha; = 0, this becomes pure Ridge/ L<sub>2</sub> regularized * Logistic Regression, however better solvers such as * {@link LogisticRegressionDCD} are faster if using &alpha; = 0. <br> * The default behavior is to use &alpha;=1, and includes the bias term. * Including the bias term can take longer to train, but can also increase * sparsity for some problems. <br> * <br> * This algorithm can be warm started from any classifier implementing the * {@link SingleWeightVectorModel} interface. * <br> * <br> * See: * <ul> * <li>Yuan, G., Ho, C.-H.,&amp;Lin, C. (2012). <i>An improved GLMNET for * L1-regularized logistic regression</i>. Journal of Machine Learning Research, * 13, 1999–2030. doi:10.1145/2020408.2020421</li> * <li>King, R., Morgan, B. J. T., Gimenez, O., Brooks, S. P., Crc, H.,&amp;Raton, * B. (2010). <i>Regularization Paths for Generalized Linear Models via * Coordinate Descent</i>. Journal of Statistical Software, 36(1), 1–22.</li> * <li>Zou, H.,&amp;Hastie, T. (2005). <i>Regularization and variable selection * via the elastic net</i>. Journal of the Royal Statistical Society, Series B, * 67(2), 301–320. doi:10.1111/j.1467-9868.2005.00503.x</li> * </ul> * * @author Edward Raff */ public class NewGLMNET implements WarmClassifier, Parameterized, SingleWeightVectorModel { private static final long serialVersionUID = 4133368677783573518L; //TODO make these other fields configurable as well private static final double DEFAULT_BETA = 0.5; private static final double DEFAULT_V = 1e-12; private static final double DEFAULT_GAMMA = 0; private static final double DEFAULT_SIGMA = 0.01; /** * The default tolerance for training is {@value #DEFAULT_EPS}. */ public static final double DEFAULT_EPS = 1e-2; /** * The default number of outer iterations of the training algorithm is * {@value #DEFAULT_MAX_OUTER_ITER} . */ public static final int DEFAULT_MAX_OUTER_ITER = 100; /** * Weight vector */ private Vec w; /** * Bias term */ private double b; private double beta = DEFAULT_BETA; private double v = DEFAULT_V; private double gamma = DEFAULT_GAMMA; private double sigma = DEFAULT_SIGMA; private double C; private double alpha; private int maxOuterIters = DEFAULT_MAX_OUTER_ITER; private double e_out = DEFAULT_EPS; private boolean useBias = true; /** * The maximum allowed line-search steps */ private int maxLineSearchSteps = 20; /** * Creates a new L<sub>1</sub> regularized Logistic Regression solver with * C = 1. */ public NewGLMNET() { this(1); } /** * Creates a new L<sub>1</sub> regularized Logistic Regression solver * @param C the regularization term */ public NewGLMNET(double C) { this(C, 1); } /** * Creates a new Elastic Net regularized Logistic Regression solver * @param C the regularization term * @param alpha the fraction of weight (in [0, 1]) to apply to L<sub>1</sub> * regularization instead of L<sub>2</sub> regularization. */ public NewGLMNET(double C, double alpha) { setC(C); setAlpha(alpha); } /** * Copy constructor * @param toCopy the object to copy */ protected NewGLMNET(NewGLMNET toCopy) { if(toCopy.w !=null) this.w = toCopy.w.clone(); this.b = toCopy.b; this.beta = toCopy.beta; this.v = toCopy.v; this.gamma = toCopy.gamma; this.sigma = toCopy.sigma; this.C = toCopy.C; this.e_out = toCopy.e_out; this.maxOuterIters = toCopy.maxOuterIters; this.alpha = toCopy.alpha; this.useBias = toCopy.useBias; } /** * Sets the regularization term, where smaller values indicate a larger * regularization penalty. * * @param C the positive regularization term */ @WarmParameter(prefLowToHigh = true) public void setC(double C) { if(C <= 0 || Double.isInfinite(C) || Double.isNaN(C)) throw new IllegalArgumentException("Regularization term C must be a positive value, not " + C); this.C = C; } /** * * @return the regularization term */ public double getC() { return C; } /** * Using &alpha; = 1 corresponds to pure L<sub>1</sub> regularization, and * &alpha; = 0 corresponds to pure L<sub>2</sub> regularization. Any value * in-between is then an Elastic Net regularization. * * @param alpha the value in [0, 1] for determining the regularization * penalty's interpolation between pure L<sub>2</sub> and L<sub>1</sub> * regularization. */ public void setAlpha(double alpha) { if(alpha < 0 || alpha > 1 || Double.isNaN(alpha)) throw new IllegalArgumentException("alpha must be in [0, 1], not " + alpha); this.alpha = alpha; } /*** * * @return the fraction of weight (in [0, 1]) to apply to L<sub>1</sub> * regularization instead of L<sub>2</sub> regularization. */ public double getAlpha() { return alpha; } /** * Sets the maximum number of training iterations for the algorithm, * specifically the outer loop as mentioned in the original paper. * {@value #DEFAULT_MAX_OUTER_ITER} is the default value used, and may need * to be increased for more difficult problems. * * @param maxOuterIters the maximum number of outer iterations */ public void setMaxIters(int maxOuterIters) { if(maxOuterIters < 1) throw new IllegalArgumentException("Number of training iterations must be positive, not " + maxOuterIters); this.maxOuterIters = maxOuterIters; } /** * * @return the maximum number of training iterations */ public int getMaxIters() { return maxOuterIters; } /** * Sets the tolerance parameter for convergence. Smaller values will be more * exact, but larger values will converge faster. The default value is * fairly exact at {@value #DEFAULT_EPS}, increasing it by an order of * magnitude can often be done without hurting accuracy. * * @param e_out the tolerance parameter. */ public void setTolerance(double e_out) { if(e_out <= 0 || Double.isNaN(e_out)) throw new IllegalArgumentException("convergence tolerance paramter must be positive, not " + e_out); this.e_out = e_out; } /** * * @return the convergence tolerance parameter */ public double getTolerance() { return e_out; } /** * Controls whether or not an un-regularized bias term is added to the * model. Using a bias term can increase runtime, especially in sparse data * sets, as each data point will have work done for the implicit bias term. * However the bias term is usually needed for small dimension problems, and * can improve the sparsity of the solution for higher dimensional problems. * * @param useBias {@code true} if an un-regularized bias term should be used * or {@code false} to not use any bias term. */ public void setUseBias(boolean useBias) { this.useBias = useBias; } /** * * @return {@code true} if an un-regularized bias term will be used * or {@code false} to not use any bias term. */ public boolean isUseBias() { return useBias; } @Override public CategoricalResults classify(DataPoint data) { return LogisticLoss.classify(w.dot(data.getNumericalValues())+b); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet, Classifier warmSolution, boolean parallel) { train(dataSet, warmSolution); } @Override public void train(ClassificationDataSet dataSet, Classifier warmSolution) { if(warmSolution instanceof SimpleWeightVectorModel) { SimpleWeightVectorModel swv = (SimpleWeightVectorModel) warmSolution; train(dataSet, swv.getRawWeight(0), swv.getBias(0), true); } else throw new FailedToFitException("Warm solution is not of a"); } @Override public void train(ClassificationDataSet dataSet) { train(dataSet, null, 0, false); } private void train(ClassificationDataSet dataSet, Vec w_init, double b_init, boolean useInit) { /* * The original NewGLMNET paper describes the algorithm as minimizing * f(w) = ||w||_1 + L(w), where L(w) is the logistic loss summed over * all the variables. To make adapation to elastic net easier, we define * f(w) = alpha ||w||_1 + L(w), where L(w) = (1-alpha) ||w||_2 + loss sum. * This way we keep all the framework for L_1 regularization and * shrinking, and just update the appropriate terms where necessary. */ //paper uses n= #features so we will follow their lead final int n = dataSet.getNumNumericalVars(); //l = # data points final int l = dataSet.size(); if(useInit) { w = new DenseVector(w_init); b = useBias ? b_init : 0; } else { w = new DenseVector(n); b = 0; } double first_M_bar = 0; double e_in = 1.0;//set later when first_M_bar is set double[] w_dot_x = new double[l]; double[] exp_w_dot_x = new double[l]; double[] exp_w_dot_x_plus_dx = new double[l]; /** * Used in the linear search step at the end */ double[] d_dot_x = new double[l]; /** * Contains the value 1/(1+e^(w^T x)). This is used in computing D and the partial derivatives. */ double[] D_part = new double[l]; double[] D = new double[l]; /** * Stores the value H<sup>k</sup><sub>j,j</sub> computer at the start of each iteration */ double[] H = new double[n]; /** * Stores the value H<sup>k</sup><sub>j,j</sub> computer at the start of * each iteration for the bias term */ double H_bias = 0; /** * Stores the value &nambla; L<sub>j</sub> */ double[] delta_L = new double[n]; /** * The gradient value for the bias term */ double delta_L_bias = 0; float[] y = new float[l]; double w_norm_1; double w_norm_2; List<Vec> columnsOfX = new ArrayList<>(Arrays.asList(dataSet.getNumericColumns())); if(useInit) { //lets go through all columns to compure w_dot_x values. We could do a row-major version and get //w_dot_x[i] = w.dot(dataSet.getDataPoint(i).getNumericalValues())+b; //but we are a column major algorithm, so lets assume oru data is stored in a colum major fashion //First, add bias term to each value Arrays.fill(w_dot_x, b); //now add contributions one column/feature at a time for(int j = 0; j < n; j++) { Vec col_j = columnsOfX.get(j); double w_j = w.get(j); for(IndexValue iv : col_j) w_dot_x[iv.getIndex()] += w_j*iv.getValue(); } //now w_dot_x should be initailized correctly, and we can go through the rows to prep other values needed for(int i = 0; i < l; i++) { y[i] = dataSet.getDataPointCategory(i)*2-1; //this was computed in the above loop over columns //w_dot_x[i] = w.dot(dataSet.getDataPoint(i).getNumericalValues())+b; final double tmp = exp_w_dot_x_plus_dx[i] = exp_w_dot_x[i] = exp(w_dot_x[i]); final double D_part_i = D_part[i]= 1/(1+tmp); D[i] = tmp*D_part_i*D_part_i; } w_norm_1 = w.pNorm(1); w_norm_2 = w.pNorm(2); } else//w = 0 { for(int i = 0; i < l; i++) { y[i] = dataSet.getDataPointCategory(i)*2-1; w_dot_x[i] = 0.0; exp_w_dot_x_plus_dx[i] = exp_w_dot_x[i] = 1.0; D_part[i]= 0.5; D[i] = 0.25; } w_norm_1 = w.pNorm(1); w_norm_2 = w.pNorm(2); } /** * sum of all x_j values in the negative class. Used for ∇_j L in trick * from LIBLINEAR eq(44) */ double[] col_neg_class_sum = new double[n]; for(int j = 0; j < n; j++) { Vec vec = columnsOfX.get(j); for(IndexValue iv : vec) if(y[iv.getIndex()] == -1) col_neg_class_sum[j] += iv.getValue(); } /** * Sum of all x_j values in the negative class for the bias term. */ double col_neg_class_sum_bias = 0; if(useBias) { for(int i = 0; i < l; i++) if(y[i] == -1) col_neg_class_sum_bias++; } /** * weight for L_1 reg is alpha, so this will be the L_2 weight (1-alpha) */ final double l2w = (1-alpha); // { // double objVal = 0; // for(int i = 0; i < l; i++) // objVal += C*log(1+exp(-y[i]*w_dot_x[i])); // objVal += alpha*w_norm_1 + l2w*w_norm_2; // System.out.println("Start Obj Val: " + objVal); // } //algo 3 //Let M^out ← ∞ double M_out = Double.POSITIVE_INFINITY; Vec d = new DenseVector(n); double d_bias = 0; boolean prevLineSearchFail = false; for(int k = 0; k < maxOuterIters; k++)//For k = 1, 2, 3, . . . { //algo 3, Step 1. boolean[] J = new boolean[n]; Arrays.fill(J, true); double M = 0; double M_bar = 0; //algo 3, Step 2. for(int j = 0; j < J.length; j++) { double w_j = w.get(j); //2.1. Calculate H^k_{jj}, ∇_j L(w^k) and ∇^S_j f(w^k) double delta_j_L = 0; double deltaSqrd_L = 0; for(IndexValue x_i : columnsOfX.get(j)) { int i = x_i.getIndex(); double val = x_i.getValue(); delta_j_L += -val*D_part[i]; //eq(44) from LIBLINEAR paper , re-factored to avoid a division by using D_part deltaSqrd_L += val*val*D[i]; } delta_L[j] = delta_j_L = l2w*w_j + C*(delta_j_L + col_neg_class_sum[j]); //H^k from eq (19) /* * regular is C X^T D X, L2 just adds + I , but we are alreayd * doing + eps * I to make sure the gradient is there. So just * do the max of v and lambda_2 */ H[j] = C*deltaSqrd_L + max(v, l2w); double deltaS_j_fw; if(w_j > 0) deltaS_j_fw = delta_j_L+alpha; else if(w_j < 0) deltaS_j_fw = delta_j_L-alpha; else//w_j = 0 deltaS_j_fw = signum(delta_j_L)*max(abs(delta_j_L)-alpha, 0); //done with step 2, we have all the info //2.2. If w^k_j = 0 and |∇_j L(w^k)| < 1−M^out/l // outer-level shrinking //then J ←J\{j}. //else M ←max(M, |∇^S_j f(w^k)|) and M_bar ← M_bar +|∇^S_j f(w^k)| if(w_j == 0 && abs(delta_j_L) < alpha-M_out/l) // j_iter.remove(); J[j] = false;//remove J from working set else { M = max(M, abs(deltaS_j_fw)); M_bar += abs(deltaS_j_fw); } } if(useBias) { //2.1. Calculate H^k_{jj}, ∇_j L(w^k) and ∇^S_j f(w^k) double delta_j_L = 0; double deltaSqrd_L = 0; for(int i = 0; i < l ; i++)//all have an implicit bias term { delta_j_L += -D_part[i]; //eq(44) from LIBLINEAR paper , re-factored to avoid a division by using D_part deltaSqrd_L += D[i]; } delta_L_bias = delta_j_L = C*(delta_j_L + col_neg_class_sum_bias); //H^k from eq (19) , but dont need v * I since its the bias term H_bias = C*deltaSqrd_L + v; double deltaS_j_fw = delta_L_bias; M = max(M, abs(deltaS_j_fw)); M_bar += abs(deltaS_j_fw); } if (k == 0)//first run if (useInit)//we have some value of W already, e_in = first_M_bar = getM_Bar_for_w0(n, l, columnsOfX, col_neg_class_sum, col_neg_class_sum_bias); else//normal algo e_in = first_M_bar = M_bar; //algo 3, Step 3. 3. If M_bar ≤ eps_out , return w^k if(M_bar <= e_out*first_M_bar) break; //algo 3, Step 4. Let M_out ←M M_out = M; //algo 3, Step 5. Run algo 4 //START: Algorithm 4 Inner iterations of NewGLMNET with shrinking double M_in = Double.POSITIVE_INFINITY; //This code originally used an IntList/List for compact storage. But that made a problem for huge feature spaces due to O(d) removal cost per call of the iterator //thist first one is always done in same order, so switching to a linked hash set so that we get same order but O(1) removals. //T also becomes a linked hash map -> set so that we can enforce a new random iteration order for each loop // IntList T = new IntList(J); Set<Integer> T = Collections.newSetFromMap(new LinkedHashMap<>()); // T.addAll(J); for(int j = 0; j < J.length; j++) if(J[j]) T.add(j); int J_size = T.size();//record number of items in the working set d.zeroOut(); d_bias = 0; /** * Sometimes we see the |z| be very small over and over, so we stop * if we see it too many times in a row (which means we really * aren't making much progress) */ int smallZInARow = 0; for(int p = 0; p < 1000; p++)// inner iterations { //step 1. double m = 0, m_bar = 0; /** * Used to check if we aren't really making any progress */ double max_abs_z = 0; //Create a list and copy into it. Then shuffle that list to get a new ordering. Once done, clear out the original T and re-insert all the (shuffled) values. Gets us a random iter order with O(1) removals in the iterator. IntList T_shuffled = new IntList(T); Collections.shuffle(T_shuffled); T.clear(); T.addAll(T_shuffled); Iterator<Integer> T_iter = T.iterator(); final double dynRange = n*5.0/T.size();//used for dynamic clip, see below while(T_iter.hasNext())//step 2. { final int j = T_iter.next(); final double w_j = w.get(j); final double d_j = d.get(j); //from eq(16) //∇_j q^bar_k(d) = ∇_j L(w^k) + (∇^2 L(w^k) d)_j //∇^2_jj q^bar_k(d)=∇^2_{jj} L(w^k) double delta_qBar_j = 0; //first compute the (∇^2 L(w^k) d)_j portion //see after algo 2 before eq (17) for(IndexValue iv : columnsOfX.get(j)) { int i = iv.getIndex(); delta_qBar_j += iv.getValue()*D[i]*d_dot_x[i]; } delta_qBar_j *= C; //now add the part we know from before delta_qBar_j += delta_L[j]; /* * For L_2, use (A+B)C = AC + BC to modify ((lambda_2 * I + ∇_2 L(w))d)j * so we need to add lambda_2 * I d^{p, j}_j to the final * value. I * x = x, and we are taking the value of the j'th * coordinate, so we just have to add lambda_2 d_j */ delta_qBar_j += l2w*d_j; double deltaS_q_k_j; if(w_j + d_j > 0) deltaS_q_k_j = delta_qBar_j + alpha; else if(w_j + d_j < 0) deltaS_q_k_j = delta_qBar_j - alpha; else //w_j + d_j == 0 deltaS_q_k_j = signum(delta_qBar_j)*max(abs(delta_qBar_j)-alpha, 0); double deltaSqrd_q_jj = H[j]; if(w_j + d_j == 0 && abs(delta_qBar_j) < alpha - M_in/l) { T_iter.remove();//inner-level shrinking } else { m = max(m, abs(deltaS_q_k_j)); m_bar += abs(deltaS_q_k_j); double z; //find z by eq (9), our w_j is actuall w_j+d_j if(delta_qBar_j+alpha <= deltaSqrd_q_jj*(w_j+d_j)) z = -(delta_qBar_j+alpha)/deltaSqrd_q_jj; else if(delta_qBar_j-alpha >= deltaSqrd_q_jj*(w_j+d_j)) z = -(delta_qBar_j-alpha)/deltaSqrd_q_jj; else z = -(w_j+d_j); if(abs(z) < 1e-11) continue; /* * When everyone is active, clip the updates to a * smaller range - as we are going to have a lot of * changes going on and this might make steps far larger * than it should. When there are fewer active * dimensions, allow for more change */ z = min(max(z,-dynRange),dynRange); max_abs_z = max(max_abs_z, abs(z)); d.increment(j, z); //book keeping, see eq(17) for(IndexValue iv : columnsOfX.get(j)) d_dot_x[iv.getIndex()] += z*iv.getValue(); } } if(useBias) { //from eq(16) //∇_j q^bar_k(d) = ∇_j L(w^k) + (∇^2 L(w^k) d)_j //∇^2_jj q^bar_k(d)=∇^2_{jj} L(w^k) double delta_qBar_j = 0; //first compute the (∇^2 L(w^k) d)_j portion //see after algo 2 before eq (17) for(int i = 0; i < l; i++) delta_qBar_j += 1*D[i]*d_dot_x[i];//compiler will take out 1*, left just to remind us its the bias term delta_qBar_j *= C; //now add the part we know from before delta_qBar_j += delta_L_bias; double deltaS_q_k_j = delta_qBar_j; double deltaSqrd_q_jj = H_bias; m = max(m, abs(deltaS_q_k_j)); m_bar += abs(deltaS_q_k_j); double z = -delta_qBar_j/(deltaSqrd_q_jj); if (abs(z) > 1e-11) { z = min(max(z, -dynRange), dynRange); max_abs_z = max(max_abs_z, abs(z)); d_bias += z; //book keeping, see eq(17) for(int i = 0; i < l ; i++) d_dot_x[i] += z; } } boolean breakInnerLoopAnyway = false; if(max_abs_z == 0) breakInnerLoopAnyway = true; else if (max_abs_z <= 1e-6) { if(smallZInARow++ >= 3)//give it a few chances breakInnerLoopAnyway = true; } else if(max_abs_z <= 1e-3) { if(smallZInARow++ >= 30)//give it a lot chances breakInnerLoopAnyway = true; } else smallZInARow = 0;//reset, we are making progress! //step 3. if(m_bar <= e_in || breakInnerLoopAnyway) { if(T.size() == J_size) { /* * If at one outer iteration, the condition (26) holds * after only one cycle of n CD steps, then we reduce * e_in by 1/4. * That is, the program automatically adjusts e_in if it * finds that too few CD steps are conducted for * minimizing qk(d) */ if(p == 0) e_in /= 4; break; } else { T.clear(); // T.addAll(J); for(int j = 0; j < J.length; j++) if(J[j]) T.add(j); M_in = Double.POSITIVE_INFINITY; } } else M_in = m; } //END: Algorithm 4 Inner iterations of NewGLMNET with shrinking //algo 3, Step 6. Compute λ = max{1,β,β^2, . . . } such that λd satisfies (20) //Use the form of eq(45) from Aug2014 LIBLINEAR paper //get ||w+d||_1 and ∇L^T d in one loop together double wPd_norm_1 = w_norm_1; double wPd_norm_2 = w_norm_2; double delta_L_dot_d = 0; for(IndexValue iv: d) { final int j = iv.getIndex(); final double w_j = w.get(j); final double d_j = iv.getValue(); wPd_norm_1 -= abs(w_j); wPd_norm_1 += abs(w_j+d_j); wPd_norm_2 -= w_j*w_j; wPd_norm_2 += (w_j+d_j)*(w_j+d_j); delta_L_dot_d += d_j*delta_L[j]; } delta_L_dot_d += d_bias*delta_L_bias; final double breakCondition = sigma*(delta_L_dot_d + alpha*(wPd_norm_1-w_norm_1) + l2w*(wPd_norm_2-w_norm_2) ); double lambda = 1; int t = 0; double wPlambda_d_norm_1 = wPd_norm_1; double wPlambda_d_norm_2 = wPd_norm_2; while(t < maxLineSearchSteps)//we may want to adjust this as beta changes { //"For line search, we use the following form of the sufficient decrease condition" eq(45) from LIBLINEAR paper Aug 2014 double newTerm = 0; for(int i = 0; i < l; i++) { double exp_lamda_d_dot_x = exp(lambda*d_dot_x[i]); exp_w_dot_x_plus_dx[i] = exp_w_dot_x[i]*exp_lamda_d_dot_x; newTerm += log((exp_w_dot_x_plus_dx[i]+1)/(exp_w_dot_x_plus_dx[i]+exp_lamda_d_dot_x )); if(y[i] == -1) newTerm += lambda*d_dot_x[i]; } newTerm = l2w*(wPlambda_d_norm_2 - w_norm_2) +//l2 reg alpha*(wPlambda_d_norm_1 - w_norm_1) + //l1 reg C*newTerm;//loss if(newTerm <= lambda * breakCondition) break; //else lambda = pow(beta, ++t); //update norm wPlambda_d_norm_1 = w_norm_1; wPlambda_d_norm_2 = w_norm_2; for(IndexValue iv: d) { final double w_j = w.get(iv.getIndex()); final double lambda_d_j = lambda*iv.getValue(); wPlambda_d_norm_1 -= abs(w_j); wPlambda_d_norm_1 += abs(w_j+lambda_d_j); wPlambda_d_norm_2 -= w_j*w_j; wPlambda_d_norm_2 += (w_j+lambda_d_j)*(w_j+lambda_d_j); } } //if line search fails twice in a row, just quit if(t == maxLineSearchSteps)//this shouldn't happen unless we are having serious trouble improving our results if (prevLineSearchFail) break;//jsut finish. else prevLineSearchFail = true; else prevLineSearchFail = false; //algo 3, Step 7. 7. w^{k+1} = w^k +λ d. w.mutableAdd(lambda, d); b += lambda * d_bias; w_norm_1 = wPlambda_d_norm_1; w_norm_2 = wPlambda_d_norm_2; //and more book keeping //val from last line search is new w System.arraycopy(exp_w_dot_x_plus_dx, 0, exp_w_dot_x, 0, l); //(w+lambda d)^T x = w^T x + d^T x for(int i = 0; i < l; i++) { w_dot_x[i] += lambda*d_dot_x[i]; final double D_part_i = D_part[i]= 1/(1+exp_w_dot_x[i]); D[i] = exp_w_dot_x[i]*D_part_i*D_part_i; } Arrays.fill(d_dot_x, 0.0);//new d = 0, always // double objVal = 0; // for(int i = 0; i < l; i++) // objVal += C*log(1+exp(-y[i]*w_dot_x[i])); // objVal += alpha*w_norm_1 + l2w*w_norm_2; // System.out.println("Iter "+ k + " has New Obj Val: " + objVal); } } /** * When we perform a warm start, we want to train to the same point that we * would have if we had not done a warm start. But our stopping point is * based on the initial relative error. To get around that, this method * computes what the error would have been for the zero weight vector * @param n * @param l * @param columnsOfX * @param col_neg_class_sum * @param col_neg_class_sum_bias * @return the error for M_bar that would have been computed if we were using the zero weight vector */ private double getM_Bar_for_w0(int n, int l, List<Vec> columnsOfX, double[] col_neg_class_sum, double col_neg_class_sum_bias) { /** * if w=0, then D_part[i] = 0.5 for all i */ final double D_part_i = 0.5; //algo 3, Step 1. double M_bar = 0; //algo 3, Step 2. for(int j = 0; j < n; j++) { final double w_j = 0; //2.1. Calculate H^k_{jj}, ∇_j L(w^k) and ∇^S_j f(w^k) double delta_j_L = -columnsOfX.get(j).sum()*0.5; delta_j_L = /* (l2w * w_j) not needed b/c w_j=0*/ + C * (delta_j_L + col_neg_class_sum[j]); double deltaS_j_fw; //only the w_j = 0 case applies, b/c that is what this method is for! //w_j = 0 deltaS_j_fw = signum(delta_j_L) * max(abs(delta_j_L) - alpha, 0); //done with step 2, we have all the info M_bar += abs(deltaS_j_fw); } if (useBias) { //2.1. Calculate H^k_{jj}, ∇_j L(w^k) and ∇^S_j f(w^k) double delta_j_L = 0; for (int i = 0; i < l; i++)//all have an implicit bias term delta_j_L += -D_part_i; delta_j_L = C * (delta_j_L + col_neg_class_sum_bias); double deltaS_j_fw = delta_j_L; M_bar += abs(deltaS_j_fw); } return M_bar; } @Override public boolean supportsWeightedData() { return false; } @Override public NewGLMNET clone() { return new NewGLMNET(this); } @Override public Vec getRawWeight() { return w; } @Override public double getBias() { return b; } @Override public Vec getRawWeight(int index) { if(index < 1) return getRawWeight(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public double getBias(int index) { if(index < 1) return getBias(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public int numWeightsVecs() { return 1; } @Override public boolean warmFromSameDataOnly() { return false; } /** * Guess the distribution to use for the trade off term term * {@link #setAlpha(double) (double) &alpha;} in Elastic Net regularization. * * @param d the data set to get the guess for * @return the guess for the &alpha; parameter */ public static Distribution guessAlpha(DataSet d) { //Would do [0, .75], but if you are doing to be so close to full L2 reg you should really be using a different solver return new Uniform(0.25, 0.75); } /** * Guess the distribution to use for the regularization term * {@link #setC(double) C} in Logistic Regression. * * @param d the data set to get the guess for * @return the guess for the C parameter */ public static Distribution guessC(DataSet d) { double maxLambda = LinearTools.maxLambdaLogisticL1((ClassificationDataSet) d); double minC = 1/(2*maxLambda*d.size()); return new LogUniform(minC*10, minC*1000); } }
36,460
35.461
223
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/OWA.java
/* * This code contributed under the public domain. */ package jsat.classifiers.linear; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import java.util.stream.IntStream; import jsat.DataSet; import jsat.SimpleWeightVectorModel; import jsat.SingleWeightVectorModel; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.ClassificationModelEvaluation; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.classifiers.WarmClassifier; import jsat.classifiers.evaluation.ClassificationScore; import jsat.classifiers.svm.DCDs; import jsat.datatransform.ProjectionTransform; import jsat.exceptions.FailedToFitException; import jsat.linear.DenseMatrix; import jsat.linear.DenseVector; import jsat.linear.Matrix; import jsat.linear.Vec; import jsat.lossfunctions.LogisticLoss; import jsat.math.OnLineStatistics; import jsat.parameters.GridSearch; import jsat.parameters.Parameter.ParameterHolder; import jsat.parameters.Parameterized; import jsat.regression.RegressionDataSet; import jsat.regression.RegressionModelEvaluation; import jsat.regression.Regressor; import jsat.regression.WarmRegressor; import jsat.regression.evaluation.RegressionScore; import jsat.utils.SystemInfo; import jsat.utils.concurrent.ParallelUtils; import jsat.utils.random.RandomUtil; /** * This class implements a general purpose method for endowing any linear * classification or regression model with the ability to train in parallel * using multiple cores. It does this by training an independent model for each * core, and using a final "aggregating" model to determine how to combine the * results from each core.<br> * <br> * See: Izbicki, M., & Shelton, C. R. (2020). Distributed Learning of * Non-convex Linear Models with One Round of Communication. In ECML-PKDD (pp. * 197–212). * * @author Edward Raff */ public class OWA implements Classifier, Regressor, Parameterized, SingleWeightVectorModel, WarmClassifier { protected int min_points_per_core = 1000; protected int sample_multipler = 3; @ParameterHolder private SingleWeightVectorModel base_learner; private boolean estimate_cv_scores = false; private List<ClassificationScore> scores_c = new ArrayList<>(); private List<RegressionScore> scores_r = new ArrayList<>(); private List<OnLineStatistics> scores_stats = new ArrayList<>(); private boolean warmTraining = false; private List<SimpleWeightVectorModel> prev_solutions = null; protected Vec w; protected double bias; /** * Constructs a new OWA model. * * @param base_learner the classifier or regressor to use as the base linear model, which will be parallelized by OWA. */ public OWA(SingleWeightVectorModel base_learner) { this.base_learner = base_learner; } /** * Copy constructor * @param toClone the object to copy */ public OWA(OWA toClone) { this.min_points_per_core = toClone.min_points_per_core; this.sample_multipler = toClone.sample_multipler; this.base_learner = (SingleWeightVectorModel) toClone.base_learner.clone(); this.estimate_cv_scores = toClone.estimate_cv_scores; if(toClone.w != null) { this.w = toClone.w.clone(); this.bias = toClone.bias; } this.scores_c = toClone.scores_c.stream().map(s->s.clone()).collect(Collectors.toList()); this.scores_r = toClone.scores_r.stream().map(s->s.clone()).collect(Collectors.toList()); this.scores_stats = toClone.scores_stats.stream().map(s->s.clone()).collect(Collectors.toList()); this.warmTraining = toClone.warmTraining; if(toClone.prev_solutions != null) { this.prev_solutions = toClone.prev_solutions.stream().map(s->s.clone()).collect(Collectors.toList()); } } /** * At a small incremental cost to training time, OWA can perform an * approximate cross-validation estimate of it's performance. K-fold cross * validation normally increases training time by a factor of K, but OWA can * estimate this for little additional work (often less than 10% additional * time). <br> * <br> * This is not done by default, but can be enabled with this * function. If enabled use {@link #addScore(jsat.classifiers.evaluation.ClassificationScore) * } or {@link #addScore(jsat.regression.evaluation.RegressionScore) } to * add scoring methods based on whether you are performing a classification * or regression problem respectively. * * @param estimate_cv_scores {@code true} to estimate CV performance while * training, or {@code false} to not perform the * additional work. */ public void setEstimateCV(boolean estimate_cv_scores) { this.estimate_cv_scores = estimate_cv_scores; } /** * * @return {@code true} is OWA will attempt to estimate cross-validation * results during training. */ public boolean issetEstimateCV() { return estimate_cv_scores; } /** * The OWA algorithm can only perform warm-started training if the base * algorithm given also supports warm-training, and so is not enabled by * default. If the base model does support warm training, and you want to * use warm-starts of OWA models, setting this to {@code true} will cause * the OWA model to keep internal copies of the previous intermediate * solutions, which increases overall memory use. These are used to * warm-start the sub-models of future OWA training runs, and the OWA code * will keep track of which subset of data it used for each processor in * order to avoid any data leakage across subsets. * * @param warmTraining {@code true} if you plan to do warm stars with OWA, * or {@code false} by default to not. */ public void setWarmTraining(boolean warmTraining) { this.warmTraining = warmTraining; } public boolean isWarmTraining() { return warmTraining; } /** * OWA can perform its own cross-validation estimates of performance. This * method adds a scoring method similar to * {@link ClassificationModelEvaluation#addScorer} to estimate performance. * * @param score */ public void addScore(ClassificationScore score) { scores_c.add(score); } /** * Gets the statistics associated with each score in one map. If no * estimated cross validation was performed, or no scores added, the result * will be an empty map. This method should be called only if OWA was trained on a classification problem. * * @return the result statistics for each score as a map. */ public Map<ClassificationScore, OnLineStatistics> getScoreStatsC() { Map<ClassificationScore, OnLineStatistics> results = new HashMap<>(); for (int i = 0; i < Math.min(scores_c.size(), scores_stats.size()); i++) results.put(scores_c.get(i), scores_stats.get(i)); return results; } /** * Gets the statistics associated with each score in one map. If no * estimated cross validation was performed, or no scores added, the result * will be an empty map. This method should be called only if OWA was trained on a regression problem. * * @return the result statistics for each score as a map. */ public Map<RegressionScore, OnLineStatistics> getScoreStatsR() { Map<RegressionScore, OnLineStatistics> results = new HashMap<>(); for (int i = 0; i < Math.min(scores_r.size(), scores_stats.size()); i++) results.put(scores_r.get(i), scores_stats.get(i)); return results; } /** * OWA can perform its own cross-validation estimates of performance. This * method adds a scoring method similar to * {@link RegressionModelEvaluation#addScorer} to estimate performance. * @param score */ public void addScore(RegressionScore score) { scores_r.add(score); } private void trainWork(final int requested_cores, DataSet dataSet, boolean parallel, Object warmSolution) { final int d = dataSet.getNumFeatures(); final int N = dataSet.size(); /** * How many "machines" (read, cores) are we using */ final int m = requested_cores <= 0 ? Math.min(Math.min(SystemInfo.LogicalCores, dataSet.size()/min_points_per_core), d/2+1) : requested_cores; // System.out.println("Using " + m + " cores"); List<Object> warm_starts; if(warmTraining && warmSolution != null) { if(!(base_learner instanceof WarmClassifier || base_learner instanceof WarmRegressor)) throw new FailedToFitException("Base class " + base_learner.getClass().getSimpleName() + " can not be trained via warm starts"); warm_starts = new ArrayList<>(); //Is the warm start an OWA with a per-split solution? if(warmSolution instanceof OWA && ((OWA)warmSolution).prev_solutions != null) for(SimpleWeightVectorModel sol : ((OWA)warmSolution).prev_solutions) warm_starts.add(sol); else//its not, just use global solution as generic start { warm_starts.add(warmSolution); } while(warm_starts.size() < m)//padd out to the number of models we are going to train to simplify code warm_starts.add(warm_starts.get(warm_starts.size()-1));//all point to same obj so cheap } else warm_starts = null; List<? extends DataSet<? extends DataSet>> splits = dataSet.cvSet(m, RandomUtil.getRandom(m*dataSet.size()));//Using a deterministic random seed so that if the models use warm-starts we can get the warm-starts to re-use the same sub-splits // System.out.println("Training local models"); List<SimpleWeightVectorModel> erms = ParallelUtils.streamP(IntStream.range(0, splits.size()), parallel).mapToObj(i-> { DataSet<? extends DataSet> data = splits.get(i); // System.out.println("Training on " + data.size() + "/" + dataSet.size() + " local samples"); SimpleWeightVectorModel w_i = base_learner.clone(); Object warm_w_i = warm_starts == null ? null : warm_starts.get(i); if(w_i instanceof Classifier) { if(w_i instanceof WarmClassifier && warm_w_i != null) ((WarmClassifier)w_i).train((ClassificationDataSet)data, ((Classifier)warm_w_i), false); else ((Classifier)w_i).train((ClassificationDataSet) data); } else { if(w_i instanceof WarmClassifier && warm_w_i != null) ((WarmRegressor)w_i).train((RegressionDataSet)data, ((Regressor)warm_w_i), false); else ((Regressor)w_i).train((RegressionDataSet) data); } return w_i; }).collect(Collectors.toList()); if(warmTraining) this.prev_solutions = erms; // System.out.println("Sample & Project"); //Lets build out projection matrix & transform Matrix W = new DenseMatrix(m, d); Vec b = new DenseVector(m); for(int i = 0; i < m; i++) { SingleWeightVectorModel w_i = (SingleWeightVectorModel) erms.get(i); w_i.getRawWeight().copyToRow(W, i); b.set(i, w_i.getBias()); } ProjectionTransform t = new ProjectionTransform(W, b); //lets prepare the smaller sub-set of data Z that is used in round 2. double sub_sample_frac = Math.min(Math.max(sample_multipler*m/(double)d+40/(double)N, (m+40)*sample_multipler/(double)(N/m)), 1.0); //Sub-sample each split to get all the parts of Z_owa. Done this way so that we can do an easy CV estimate if desired. //TODO try replacing this with some kind of coreset selection based on trained models List<? extends DataSet<? extends DataSet>> Z_owa_splits = splits.parallelStream() .map(data -> { DataSet z_i = data.randomSplit(sub_sample_frac).get(0); // System.out.println("CV Estimate chunk contribution " + z_i.size() + " based on frac " + sub_sample_frac); if(!data.rowMajor())//orig col-major may have been OK, but we want row-major now { Iterator<DataPoint> orig_iter = data.getDataPointIterator(); int pos = 0; if(data instanceof ClassificationDataSet) { ClassificationDataSet new_data = new ClassificationDataSet(W.rows(), new CategoricalData[0], ((ClassificationDataSet)z_i).getPredicting()); while(orig_iter.hasNext()) new_data.addDataPoint(t.transform(orig_iter.next()), ((ClassificationDataSet)z_i).getDataPointCategory(pos++)); z_i = new_data; } else { RegressionDataSet new_data = new RegressionDataSet(W.rows(), new CategoricalData[0]); while(orig_iter.hasNext()) new_data.addDataPoint(t.transform(orig_iter.next()), ((RegressionDataSet)z_i).getTargetValue(pos++)); z_i = new_data; } } else//apply transform easy-peasy z_i.applyTransform(t); return (DataSet<? extends DataSet>) z_i; }) .collect(Collectors.toList()); if(estimate_cv_scores) { // System.out.println("CV Estimate Steps"); scores_stats.clear(); ParallelUtils.streamP(IntStream.range(0, m), true).forEach(id-> { //build a dataset Z_{-i} to have all the results from the other corpra, but this one SimpleWeightVectorModel Z_model; DataSet Z_owa_mi; if(dataSet instanceof ClassificationDataSet) { Z_owa_mi = ClassificationDataSet.comineAllBut((List<ClassificationDataSet>)Z_owa_splits, id); LogisticRegressionDCD lr = new LogisticRegressionDCD(); lr.setUseBias(false); Z_model = lr; } else { Z_owa_mi = RegressionDataSet.comineAllBut((List<RegressionDataSet>)Z_owa_splits, id); DCDs dcd = new DCDs(); dcd.setUseBias(false); Z_model = dcd; } //We need to remove the columns associated with model i's prediction. Lazy option, lets apply a transform that simply zeros out these values so that the index match up still Z_owa_mi.applyTransform(dp -> { Vec v = dp.getNumericalValues().clone(); v.set(id, 0); return new DataPoint(v); }); // System.out.println("\tUsing " + Z_owa_mi.size() + " samples to estimate mixing ratio"); //train a model on Z_{-i} SimpleWeightVectorModel cv_model; GridSearch rs = new GridSearch((Classifier)Z_model, 5); //lazy but OK b/c DCD is also a classifier. rs.setUseWarmStarts(true); //since each processor is doing it's own search sequentially, use warm-starts to speed up as much as we can rs.autoAddParameters(Z_owa_mi, 9); rs.setTrainModelsInParallel(false); rs.setTrainFinalModel(true); if(Z_owa_mi instanceof ClassificationDataSet) { rs.train((ClassificationDataSet) Z_owa_mi, false); cv_model = (SimpleWeightVectorModel) rs.getTrainedClassifier(); } else { rs.train((RegressionDataSet) Z_owa_mi, false); cv_model = (SimpleWeightVectorModel) rs.getTrainedRegressor(); } //make super sure we don't use anything from the current id, its held out! cv_model.getRawWeight(0).set(id, 0); Vec w_mi = new DenseVector(d); Vec b_mi = new DenseVector(1); accumulateUpdates(m, cv_model, w_mi, b_mi, W, b); if(Z_owa_mi instanceof ClassificationDataSet) { ClassificationDataSet cds = (ClassificationDataSet) splits.get(id); List<ClassificationScore> scores = scores_c.stream().map(s->s.clone()).collect(Collectors.toList()); for(ClassificationScore s : scores) s.prepare(cds.getPredicting()); int pos = 0; Iterator<DataPoint> iter = cds.getDataPointIterator(); Vec weights = cds.getDataWeights(); while(iter.hasNext()) { CategoricalResults result = LogisticLoss.classify(w_mi.dot(iter.next().getNumericalValues()) + b_mi.get(0)); for(ClassificationScore s : scores) s.addResult(result, cds.getDataPointCategory(pos), weights.get(pos)); pos++; } //record the result synchronized(scores_stats) { if(scores_stats.isEmpty()) for(ClassificationScore s : scores) scores_stats.add(new OnLineStatistics()); for(int i = 0; i < scores.size(); i++) scores_stats.get(i).add(scores.get(i).getScore()); } } else//regression case { RegressionDataSet rds = (RegressionDataSet) splits.get(id); List<RegressionScore> scores = scores_r.stream().map(s->s.clone()).collect(Collectors.toList()); int pos = 0; Iterator<DataPoint> iter = rds.getDataPointIterator(); Vec weights = rds.getDataWeights(); while(iter.hasNext()) { double result = w_mi.dot(iter.next().getNumericalValues()) + b_mi.get(0); for(RegressionScore s : scores) s.addResult(result, rds.getTargetValue(pos), weights.get(pos)); pos++; } //record the result synchronized(scores_stats) { if(scores_stats.isEmpty()) for(RegressionScore s : scores) scores_stats.add(new OnLineStatistics()); for(int i = 0; i < scores.size(); i++) scores_stats.get(i).add(scores.get(i).getScore()); } } }); } // System.out.println("Train Avg"); SimpleWeightVectorModel Z_model; DataSet Z_owa; if(dataSet instanceof ClassificationDataSet) { Z_owa = ClassificationDataSet.comineAllBut((List<ClassificationDataSet>)Z_owa_splits, -1); LogisticRegressionDCD lr = new LogisticRegressionDCD(); lr.setUseBias(false); Z_model = lr; // if(estimate_cv_scores) // { // System.out.println("CV Est:"); // for(int i = 0; i < scores_c.size(); i++) // { // System.out.println(scores_c.get(i).getName() + " : " + scores_stats.get(i).getMean()); // } // } } else { Z_owa = RegressionDataSet.comineAllBut((List<RegressionDataSet>)Z_owa_splits, -1); DCDs dcd = new DCDs(); dcd.setUseBias(false); Z_model = dcd; } // System.out.println("Final model features & sample size " + Z_owa.getNumFeatures() + " " + Z_owa.size()); GridSearch rs = new GridSearch((Classifier)Z_model, 5); //lazy but OK b/c DCD is also a classifier. rs.setUseWarmStarts(false);//B/c we will do parallel run rs.autoAddParameters(Z_owa, 9); rs.setTrainModelsInParallel(true); rs.setTrainFinalModel(true); if(Z_owa instanceof ClassificationDataSet) rs.train((ClassificationDataSet) Z_owa, parallel); else rs.train((RegressionDataSet) Z_owa, parallel); SimpleWeightVectorModel weight_model = (SimpleWeightVectorModel) rs.getTrainedClassifier(); Vec w_final = new DenseVector(d); Vec b_final = new DenseVector(1); accumulateUpdates(m, weight_model, w_final, b_final, W, b); this.w = w_final; this.bias = b_final.get(0); } /** * * @param m the number of models being combined * @param w_final the location to store the final averaged weight vector * @param w_i_weights_source the aggregating model used to determine how much each of the m sub-models contribute to the final answer * @param W matrix of all m model's weights * @param b_final the location to store the final averaged bias term * @param b the vector of all m model's bias terms */ private void accumulateUpdates(final int m, SimpleWeightVectorModel w_i_weights_source, Vec w_final, Vec b_final, Matrix W, Vec b) { Vec w_i_weights = w_i_weights_source.getRawWeight(0).clone(); if(w_i_weights.min() >= 0) w_i_weights.mutableDivide(w_i_weights.sum()); for(int i = 0; i < m; i++) { w_final.mutableAdd(w_i_weights.get(i), W.getRowView(i)); b_final.increment(0, w_i_weights.get(i) * b.get(i)); } } @Override public CategoricalResults classify(DataPoint data) { return LogisticLoss.classify(w.dot(data.getNumericalValues())+bias); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { Classifier c_base = (Classifier) base_learner; if(!parallel)//why are you doing this? { c_base.train(dataSet); this.w = base_learner.getRawWeight(); this.bias = base_learner.getBias(); return; } //OK, parallel time! trainWork(-1, dataSet, parallel, null); } @Override public boolean supportsWeightedData() { if (base_learner instanceof Classifier) return ((Classifier)base_learner).supportsWeightedData(); else return ((Regressor)base_learner).supportsWeightedData(); } @Override public double regress(DataPoint data) { return w.dot(data.getNumericalValues())+bias; } @Override public void train(RegressionDataSet dataSet, boolean parallel) { Regressor r_base = (Regressor) base_learner; if(!parallel)//why are you doing this? { r_base.train(dataSet); this.w = base_learner.getRawWeight(); this.bias = base_learner.getBias(); return; } trainWork(-1, dataSet, parallel, null); } @Override public Vec getRawWeight() { return w; } @Override public double getBias() { return bias; } @Override public OWA clone() { return new OWA(this); } @Override public boolean warmFromSameDataOnly() { if( base_learner instanceof WarmClassifier) return ((WarmClassifier)base_learner).warmFromSameDataOnly(); else if( base_learner instanceof WarmRegressor) return ((WarmRegressor)base_learner).warmFromSameDataOnly(); else return false; } @Override public void train(ClassificationDataSet dataSet, Classifier warmSolution, boolean parallel) { Classifier c_base = (Classifier) base_learner; if(!parallel)//why are you doing this? { c_base.train(dataSet); this.w = base_learner.getRawWeight(); this.bias = base_learner.getBias(); return; } //OK, parallel time! trainWork(-1, dataSet, parallel, warmSolution); } }
21,628
33.998382
241
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/PassiveAggressive.java
package jsat.classifiers.linear; import java.util.List; import java.util.concurrent.ExecutorService; import jsat.DataSet; import jsat.SingleWeightVectorModel; import jsat.classifiers.*; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.distributions.Distribution; import jsat.distributions.Exponential; import jsat.distributions.LogUniform; import jsat.exceptions.FailedToFitException; import jsat.linear.DenseVector; import jsat.linear.Vec; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.regression.BaseUpdateableRegressor; import jsat.regression.RegressionDataSet; import jsat.regression.UpdateableRegressor; /** * An implementations of the 3 versions of the Passive Aggressive algorithm for * binary classification and regression. Its a type of online algorithm that * performs the minimal update necessary to correct for a mistake. * <br><br> * See:<br> * Crammer, K., Dekel, O., Keshet, J., Shalev-Shwartz, S.,&amp;Singer, Y. (2006). * <a href="http://dl.acm.org/citation.cfm?id=1248566"> * <i>Online passive-aggressive algorithms</i></a>. Journal of Machine Learning * Research, 7, 551–585. * * @author Edward Raff */ public class PassiveAggressive implements UpdateableClassifier, BinaryScoreClassifier, UpdateableRegressor, Parameterized, SingleWeightVectorModel { private static final long serialVersionUID = -7130964391528405832L; private int epochs; private double C = 0.01; private double eps = 0.001; private Vec w; private Mode mode; /** * Creates a new Passive Aggressive learner that does 10 epochs and uses * {@link Mode#PA1} */ public PassiveAggressive() { this(10, Mode.PA1); } /** * Creates a new Passive Aggressive learner * * @param epochs the number of training epochs to use during batch training * @param mode which version of the update to perform */ public PassiveAggressive(int epochs, Mode mode) { this.epochs = epochs; this.mode = mode; } /** * Controls which version of the Passive Aggressive update is used */ public static enum Mode { /** * The default Passive Aggressive algorithm, it performs correction * updates that make the minimal change necessary to correct the output * for a single input */ PA, /** * Limits the aggressiveness by reducing the maximum correction to the * {@link #setC(double) aggressiveness parameter} */ PA1, /** * Limits the aggressiveness by adding a constant factor to the * denominator of the correction. */ PA2 } /** * Set the aggressiveness parameter. Increasing the value of this parameter * increases the aggressiveness of the algorithm. It must be a positive * value. This parameter essentially performs a type of regularization on * the updates * <br> * An infinitely large value is equivalent to being completely aggressive, * and is performed when the mode is set to {@link Mode#PA}. * * @param C the positive aggressiveness parameter */ public void setC(double C) { if(Double.isNaN(C) || Double.isInfinite(C) || C <= 0) throw new ArithmeticException("Aggressiveness must be a positive constant"); this.C = C; } /** * Returns the aggressiveness parameter * @return the aggressiveness parameter */ public double getC() { return C; } /** * Sets which version of the PA update is used. * @param mode which PA update style to perform */ public void setMode(Mode mode) { this.mode = mode; } /** * Returns which version of the PA update is used * @return which PA update style is used */ public Mode getMode() { return mode; } /** * Sets the range for numerical prediction. If it is within range of the * given value, no error will be incurred. * @param eps the maximum acceptable difference in prediction and truth */ public void setEps(double eps) { this.eps = eps; } /** * Returns the maximum acceptable difference in prediction and truth * @return the maximum acceptable difference in prediction and truth */ public double getEps() { return eps; } /** * Sets the number of whole iterations through the training set that will be * performed for training * @param epochs the number of whole iterations through the data set */ public void setEpochs(int epochs) { if(epochs < 1) throw new IllegalArgumentException("epochs must be a positive value"); this.epochs = epochs; } /** * Returns the number of epochs used for training * @return the number of epochs used for training */ public int getEpochs() { return epochs; } @Override public Vec getRawWeight() { return w; } @Override public double getBias() { return 0; } @Override public Vec getRawWeight(int index) { if(index < 1) return getRawWeight(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public double getBias(int index) { if (index < 1) return getBias(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public int numWeightsVecs() { return 1; } @Override public CategoricalResults classify(DataPoint data) { CategoricalResults cr = new CategoricalResults(2); if(getScore(data) > 0) cr.setProb(1, 1); else cr.setProb(0, 1); return cr; } @Override public double getScore(DataPoint dp) { return dp.getNumericalValues().dot(w); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { BaseUpdateableClassifier.trainEpochs(dataSet, this, epochs); } @Override public boolean supportsWeightedData() { return false; } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { if(predicting.getNumOfCategories() != 2) throw new FailedToFitException("Only supports binary classification problems"); else if(numericAttributes < 1) throw new FailedToFitException("only suppors learning from numeric attributes"); w = new DenseVector(numericAttributes); } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes) { if(numericAttributes < 1) throw new FailedToFitException("only suppors learning from numeric attributes"); w = new DenseVector(numericAttributes); } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { Vec x = dataPoint.getNumericalValues(); final int y_t = targetClass*2-1; final double dot = x.dot(w); final double loss = Math.max(0, 1-y_t*dot); if(loss == 0) return; final double tau = getCorrection(loss, x); w.mutableAdd(y_t*tau, x); } @Override public void update(DataPoint dataPoint, double weight, double targetValue) { Vec x = dataPoint.getNumericalValues(); final double y_t = targetValue; final double y_p = x.dot(w); final double loss = Math.max(0, Math.abs(y_p-y_t)-eps); if(loss == 0) return; final double tau = getCorrection(loss, x); w.mutableAdd(Math.signum(y_t-y_p)*tau, x); } private double getCorrection(final double loss, Vec x) { final double xNorm = Math.pow(x.pNorm(2), 2); if(mode == Mode.PA1) return Math.min(C, loss/xNorm); else if(mode == Mode.PA2) return loss/(xNorm+1.0/(2*C)); else return loss/xNorm; } @Override public double regress(DataPoint data) { return w.dot(data.getNumericalValues()); } @Override public void train(RegressionDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(RegressionDataSet dataSet) { BaseUpdateableRegressor.trainEpochs(dataSet, this, epochs); } @Override public PassiveAggressive clone() { PassiveAggressive clone = new PassiveAggressive(epochs, mode); clone.eps = this.eps; clone.C = this.C; if(this.w != null) clone.w = this.w; return clone; } /** * Guess the distribution to use for the regularization term * {@link #setC(double) C} in PassiveAggressive. * * @param d the data set to get the guess for * @return the guess for the C parameter */ public static Distribution guessC(DataSet d) { return new LogUniform(0.001, 100); } }
9,497
26.293103
146
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/ROMMA.java
package jsat.classifiers.linear; import jsat.SingleWeightVectorModel; import jsat.classifiers.*; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.linear.DenseVector; import jsat.linear.Vec; /** * Provides an implementation of the linear Relaxed online Maximum Margin * algorithm, which finds a similar solution to SVMs. By default, the aggressive * variant with an implicit bias term is used, which is the suggested form from * the paper. It is a binary classifier. * <br><br> * See: Li, Y.,&amp;Long, P. M. (2002). <i>The Relaxed Online Maximum Margin * Algorithm</i>. Machine Learning, 46(1-3), 361–387. * doi:10.1023/A:1012435301888 * * @author Edward Raff */ public class ROMMA extends BaseUpdateableClassifier implements BinaryScoreClassifier, SingleWeightVectorModel { private static final long serialVersionUID = 8163937542627337711L; private boolean useBias = true; private boolean aggressive; private Vec w; private double bias; /** * Creates a new aggressive ROMMA classifier */ public ROMMA() { this(true); } /** * Creates a new ROMMA classifier * @param aggressive whether or not to use the aggressive variant */ public ROMMA(boolean aggressive) { setAggressive(aggressive); } /** * Copy constructor * @param other the ROMMA object to copy */ protected ROMMA(ROMMA other) { this.aggressive = other.aggressive; if(other.w != null) this.w = other.w; this.bias = other.bias; this.useBias = other.useBias; } @Override public ROMMA clone() { return new ROMMA(this); } /** * Determines whether the normal or aggressive ROMMA algorithm will be used. * @param aggressive {@code true} to use the aggressive variant */ public void setAggressive(boolean aggressive) { this.aggressive = aggressive; } /** * Returns whether or not the aggressive variant of ROMMA is used * @return {@code true} if the aggressive variant of ROMMA is used */ public boolean isAggressive() { return aggressive; } /** * Sets whether or not an implicit bias term will be added to the data set * @param useBias {@code true} to add an implicit bias term */ public void setUseBias(boolean useBias) { this.useBias = useBias; } /** * Returns whether or not an implicit bias term is in use * @return {@code true} if a bias term is in use */ public boolean isUseBias() { return useBias; } /** * Returns the weight vector used to compute results via a dot product. <br> * Do not modify this value, or you will alter the results returned. * @return the learned weight vector for prediction */ public Vec getWeightVec() { return w; } @Override public Vec getRawWeight() { return w; } @Override public double getBias() { return bias; } @Override public Vec getRawWeight(int index) { if(index < 1) return getRawWeight(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public double getBias(int index) { if (index < 1) return getBias(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public int numWeightsVecs() { return 1; } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { if(numericAttributes <= 0) throw new FailedToFitException("ROMMA requires numerical features"); else if(predicting.getNumOfCategories() != 2) throw new FailedToFitException("ROMMA only supports binary classification"); w = new DenseVector(numericAttributes); bias = 0; } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { Vec x = dataPoint.getNumericalValues(); double wx = w.dot(x)+bias; double y = targetClass*2-1; double pred = y*wx; if(pred < 1) { final double ww = w.dot(w); final double xx = x.dot(x); final double wwxx = ww*xx; if(aggressive) { if(pred >= wwxx) { w.zeroOut(); w.mutableAdd(y/xx, x); if(useBias) bias = y/xx; return; } } //else / normal double denom = wwxx - wx * wx; double c = (wwxx - pred) / denom; double d = (ww * (y - wx)) / denom; w.mutableMultiply(c); w.mutableAdd(d, x); if(useBias) bias = c*bias + d; } } @Override public CategoricalResults classify(DataPoint data) { if(w == null) throw new UntrainedModelException("Model has not been trained"); double wx = getScore(data); CategoricalResults cr = new CategoricalResults(2); if(wx < 0) cr.setProb(0, 1.0); else cr.setProb(1, 1.0); return cr; } @Override public double getScore(DataPoint dp) { return w.dot(dp.getNumericalValues())+bias; } @Override public boolean supportsWeightedData() { return false; } }
5,785
24.946188
113
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/SCD.java
package jsat.classifiers.linear; import java.util.*; import java.util.concurrent.ExecutorService; import jsat.SingleWeightVectorModel; import jsat.classifiers.*; import jsat.exceptions.UntrainedModelException; import jsat.linear.DenseVector; import jsat.linear.IndexValue; import jsat.linear.Vec; import jsat.lossfunctions.*; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.regression.RegressionDataSet; import jsat.regression.Regressor; import jsat.utils.random.RandomUtil; import jsat.utils.random.XORWOW; /** * Implementation of Stochastic Coordinate Descent for L1 regularized * classification and regression. Which one is supported is controlled by the * {@link LossFunc} used. To be used the loss function must be twice * differentiable with a finite maximal second derivative value. * {@link LogisticLoss} for classification and {@link SquaredLoss} for * regression are the ones used in the original paper. * <br><br> * Because the SCD needs column major data for efficient implementation, a * second copy of data will be created in memory during training. * <br><br> * See: Shalev-Shwartz, S.,&amp;Tewari, A. (2009). <i>Stochastic Methods for * L<sub>1</sub>-regularized Loss Minimization</i>. In 26th International * Conference on Machine Learning (Vol. 12, pp. 929–936). Retrieved from * <a href="http://eprints.pascal-network.org/archive/00005418/">here</a> * * @author Edward Raff */ public class SCD implements Classifier, Regressor, Parameterized, SingleWeightVectorModel { private static final long serialVersionUID = 3576901723216525618L; private Vec w; private LossFunc loss; private double reg; private int iterations; /** * Creates anew SCD learner * * @param loss the loss function to use * @param regularization the regularization term to used * @param iterations the number of iterations to perform */ public SCD(LossFunc loss, double regularization, int iterations) { double beta = loss.getDeriv2Max(); if (Double.isNaN(beta) || Double.isInfinite(beta) || beta <= 0) throw new IllegalArgumentException("SCD needs a loss function with a finite positive maximal second derivative"); this.loss = loss; setRegularization(regularization); setIterations(iterations); } /** * Copy constructor * * @param toCopy the object to copy */ public SCD(SCD toCopy) { this(toCopy.loss.clone(), toCopy.reg, toCopy.iterations); if (toCopy.w != null) this.w = toCopy.w.clone(); } /** * Sets the number of iterations that will be used. * * @param iterations the number of training iterations */ public void setIterations(int iterations) { if(iterations < 1) throw new IllegalArgumentException("The iterations must be a positive value, not " + iterations); this.iterations = iterations; } /** * Returns the number of iterations used * * @return the number of iterations used */ public int getIterations() { return iterations; } /** * Sets the regularization constant used for learning. The regularization * must be positive, and the learning rate is proportional to the * regularization value. This means regularizations very near zero will take * a long time to converge. * * @param regularization the regularization to apply in (0, Infinity) */ public void setRegularization(double regularization) { if (Double.isInfinite(regularization) || Double.isNaN(regularization) || regularization <= 0) throw new IllegalArgumentException("Regularization must be a positive value"); this.reg = regularization; } /** * Returns the regularization parameter value used for learning. * * @return the regularization parameter value used for learning. */ public double getRegularization() { return reg; } @Override public Vec getRawWeight() { return w; } @Override public double getBias() { return 0; } @Override public Vec getRawWeight(int index) { if(index < 1) return getRawWeight(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public double getBias(int index) { if (index < 1) return getBias(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public int numWeightsVecs() { return 1; } @Override public CategoricalResults classify(DataPoint data) { if (w != null && loss instanceof LossC) return ((LossC) loss).getClassification(w.dot(data.getNumericalValues())); else throw new UntrainedModelException("Model was not trained with a classification function"); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { double[] targets = new double[dataSet.size()]; for (int i = 0; i < targets.length; i++) targets[i] = dataSet.getDataPointCategory(i) * 2 - 1; train(dataSet.getNumericColumns(), targets); } @Override public boolean supportsWeightedData() { return false; } @Override public double regress(DataPoint data) { if (w != null && loss instanceof LossR) return ((LossR) loss).getRegression(w.dot(data.getNumericalValues())); else throw new UntrainedModelException("Model was not trained with a classification function"); } @Override public void train(RegressionDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(RegressionDataSet dataSet) { train(dataSet.getNumericColumns(), dataSet.getTargetValues().arrayCopy()); } /** * * @param columns columns of the training matrix * @param y the target values */ private void train(Vec[] columns, double[] y) { final double beta = loss.getDeriv2Max(); double[] z = new double[y.length];///stores w.dot(x) w = new DenseVector(columns.length); Random rand = RandomUtil.getRandom(); for (int iter = 0; iter < iterations; iter++) { final int j = rand.nextInt(columns.length); double g = 0; for (IndexValue iv : columns[j]) g += loss.getDeriv(z[iv.getIndex()], y[iv.getIndex()]) * iv.getValue(); g /= y.length; final double w_j = w.get(j); final double eta; if (w_j - g / beta > reg / beta) eta = -g / beta - reg / beta; else if (w_j - g / beta < -reg / beta) eta = -g / beta + reg / beta; else eta = -w_j; w.increment(j, eta); for (IndexValue iv : columns[j]) z[iv.getIndex()] += eta * iv.getValue(); } } @Override public SCD clone() { return new SCD(this); } }
7,382
28.770161
125
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/SCW.java
package jsat.classifiers.linear; import jsat.classifiers.BaseUpdateableClassifier; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.classifiers.DataPoint; import jsat.distributions.Normal; import jsat.exceptions.FailedToFitException; import jsat.linear.DenseVector; import jsat.linear.IndexValue; import jsat.linear.Matrix; import jsat.linear.Vec; import static java.lang.Math.*; import java.util.List; import jsat.DataSet; import jsat.SingleWeightVectorModel; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.distributions.Distribution; import jsat.distributions.LogUniform; import jsat.distributions.Uniform; import jsat.exceptions.UntrainedModelException; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; /** * Provides an Implementation of Confidence-Weighted (CW) learning and Soft * Confidence-Weighted (SCW), both of which are binary linear classifiers * inspired by {@link PassiveAggressive}. The SCW mode handles noisy and * nonlinearly separable datasets better. <br> * NOTE: Unlike other online second order methods, when using the full * covariance matrix, all new inputs cost O(d<sup>2</sup>) time to process, even * if update is needed. * <br> * NOTE: This implementation does not add an implicit bias term, so the solution * goes through the origin * <br><br> * See:<br> * <ul> * <li> * Crammer, K., Fern, M.,&amp;Pereira, O. (2008). <i>Exact Convex * Confidence-Weighted Learning</i>. In Advances in Neural Information * Processing Systems 22 (pp. 345–352). Retrieved from * <a href="http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.169.3364"> * here</a> * </li> * <li> * Wang, J., Zhao, P.,&amp;Hoi, S. C. H. (2012). <i>Exact Soft Confidence-Weighted * Learning</i>. ICML. Learning. Retrieved from * <a href="http://arxiv.org/abs/1206.4612">here</a> * </li> * </ul> * * @author Edward Raff */ public class SCW extends BaseUpdateableClassifier implements BinaryScoreClassifier, Parameterized, SingleWeightVectorModel { private static final long serialVersionUID = -6721377074407660742L; private double C = 1; private double eta; //all set when eta is set private double phi, phiSqrd, zeta, psi; private Mode mode; private Vec w; /** * Full covariance matrix */ private Matrix sigmaM; /** * Diagonal only covariance matrix */ private Vec sigmaV; /** * Temp vector used to store Sigma * x_t. Make sure the vector is zeroed out * before returning from update */ private Vec Sigma_xt; private boolean diagonalOnly = false; /** * More than one escape point, makes sure to zero out {@link #Sigma_xt} * using the input incase of sparseness * @param x_t */ private void zeroOutSigmaXt(final Vec x_t) { //Zero out temp store if(diagonalOnly && x_t.isSparse())//only these values will be non zero for(IndexValue iv : x_t) Sigma_xt.set(iv.getIndex(), 0.0); else Sigma_xt.zeroOut(); } /** * Which version of the algorithms shuld be used */ public static enum Mode { /** * The standard Confidence Weighted algorithm */ CW, /** * SCW-I which is strongly related to PA-I */ SCWI, /** * SCW-II, which is strongly related to PA-II */ SCWII } /** * Creates a new SCW learner */ public SCW() { this(0.5, Mode.SCWI, true); } /** * Creates a new SCW learner * @param eta the margin confidence parameter in [0.5, 1] * @param mode mode controlling which algorithm to use * @param diagonalOnly whether or not to use only the diagonal of the * covariance matrix * @see #setEta(double) * @see #setMode(jsat.classifiers.linear.SCW.Mode) * @see #setDiagonalOnly(boolean) */ public SCW(double eta, Mode mode, boolean diagonalOnly) { setEta(eta); setMode(mode); setDiagonalOnly(diagonalOnly); } /** * Copy constructor * @param other object to copy */ protected SCW(SCW other) { this.C = other.C; this.diagonalOnly = other.diagonalOnly; this.mode = other.mode; this.setEta(other.eta); if(other.w != null) this.w = other.w.clone(); if(other.sigmaM != null) this.sigmaM = other.sigmaM.clone(); if(other.sigmaV != null) this.sigmaV = other.sigmaV.clone(); if(other.Sigma_xt != null) this.Sigma_xt = other.Sigma_xt.clone(); } /** * SCW uses a probabilistic version of the margin and attempts to make a * correction so that the confidence with correct label would be of a * certain threshold, which is set by eta. So the threshold must be in * [0.5, 1.0]. Values in the range [0.8, 0.9] often work well on a wide * range of problems * * @param eta the confidence to correct to */ public void setEta(double eta) { if(Double.isNaN(eta) || eta < 0.5 || eta > 1.0) throw new IllegalArgumentException("eta must be in [0.5, 1] not " + eta); this.eta = eta; this.phi = Normal.invcdf(eta, 0, 1); this.phiSqrd = phi*phi; this.zeta = 1 + phiSqrd; this.psi = 1 + phiSqrd/2; } /** * Returns the target correction confidence * @return the target correction confidence */ public double getEta() { return eta; } /** * Set the aggressiveness parameter. Increasing the value of this parameter * increases the aggressiveness of the algorithm. It must be a positive * value. This parameter essentially performs a type of regularization on * the updates * <br> * The aggressiveness parameter is only used by {@link Mode#SCWI} and * {@link Mode#SCWII} * * @param C the positive aggressiveness parameter */ public void setC(double C) { this.C = C; } /** * Returns the aggressiveness parameter * @return the aggressiveness parameter */ public double getC() { return C; } /** * Controls which version of the algorithm is used * @param mode which algorithm to use */ public void setMode(Mode mode) { this.mode = mode; } /** * Returns which algorithm is used * @return which algorithm is used */ public Mode getMode() { return mode; } /** * Using the full covariance matrix requires <i>O(d<sup>2</sup>)</i> work on * updates, where <i>d</i> is the dimension of the data. Runtime can be * reduced by using only the diagonal of the matrix to perform updates * in <i>O(s)</i> time, where <i>s &le; d</i> is the number of non-zero * values in the input * @param diagonalOnly {@code true} to use only the diagonal of the covariance */ public void setDiagonalOnly(boolean diagonalOnly) { this.diagonalOnly = diagonalOnly; } /** * Returns {@code true} if the covariance matrix is restricted to its diagonal entries * @return {@code true} if the covariance matrix is restricted to its diagonal entries */ public boolean isDiagonalOnly() { return diagonalOnly; } /** * Returns the weight vector used to compute results via a dot product. <br> * Do not modify this value, or you will alter the results returned. * @return the learned weight vector for prediction */ public Vec getWeightVec() { return w; } @Override public Vec getRawWeight() { return w; } @Override public double getBias() { return 0; } @Override public Vec getRawWeight(int index) { if(index < 1) return getRawWeight(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public double getBias(int index) { if (index < 1) return getBias(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public int numWeightsVecs() { return 1; } @Override public SCW clone() { return new SCW(this); } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { if(numericAttributes <= 0) throw new FailedToFitException("SCW requires numeric attributes to perform classification"); else if(predicting.getNumOfCategories() != 2) throw new FailedToFitException("SCW is a binary classifier"); w = new DenseVector(numericAttributes); Sigma_xt = new DenseVector(numericAttributes); if(diagonalOnly) { sigmaV = new DenseVector(numericAttributes); sigmaV.mutableAdd(1); } else sigmaM = Matrix.eye(numericAttributes); } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { final Vec x_t = dataPoint.getNumericalValues(); final double y_t = targetClass*2-1; double score = x_t.dot(w); double v_t = 0; if (diagonalOnly) { //Faster to set only the needed final values for (IndexValue iv : x_t) { double x_t_i = iv.getValue(); v_t += x_t_i * x_t_i * sigmaV.get(iv.getIndex()); } } else { sigmaM.multiply(x_t, 1, Sigma_xt); v_t = x_t.dot(Sigma_xt); } //Check for numerical issues if(v_t <= 0)//semi positive definit, should not happen throw new FailedToFitException("Numerical issues occured"); double m_t = y_t*score; final double loss = max(0, phi*sqrt(v_t)-m_t); if(loss <= 1e-15) { if(!diagonalOnly) zeroOutSigmaXt(x_t); return; } final double alpha_t; if(mode == Mode.SCWI || mode == Mode.CW) { double tmp = max(0, (-m_t*psi+sqrt(m_t*m_t*phiSqrd*phiSqrd/4+v_t*phiSqrd*zeta))/(v_t*zeta) ); if(mode == Mode.SCWI) alpha_t = min(C, tmp); else alpha_t = tmp; } else//SCWII { final double n_t = v_t+1/(2*C); final double gamma = phi*sqrt(phiSqrd*v_t*v_t*m_t*m_t+4*n_t*v_t*(n_t+v_t*phiSqrd)); alpha_t = max(0, (-(2*m_t*n_t+phiSqrd*m_t*v_t)+gamma)/(2*(n_t*n_t+n_t*v_t*phiSqrd))); } if(alpha_t < 1e-7)//update is numerically unstable { if(!diagonalOnly) zeroOutSigmaXt(x_t); return; } final double u_t = pow(-alpha_t*v_t*phi+sqrt(alpha_t*alpha_t*v_t*v_t*phiSqrd+4*v_t), 2)/4; //Now update mean and variance if (diagonalOnly) { for (IndexValue iv : x_t) { double x_t_i = iv.getValue(); double tmp = x_t_i * sigmaV.get(iv.getIndex()); w.increment(iv.getIndex(), alpha_t * y_t * tmp); } } else w.mutableAdd(alpha_t * y_t, Sigma_xt); if(diagonalOnly)//diag does not need beta { //Only non zeros change the cov values final double coef = alpha_t*phi*pow(u_t, -0.5); for(IndexValue iv : x_t) { int idx = iv.getIndex(); double S_rr = sigmaV.get(idx); sigmaV.set(idx, 1/(1/S_rr+coef*pow(iv.getValue(), 2))); } } else { final double beta_t = alpha_t*phi/(sqrt(u_t)+v_t*alpha_t*phi); Matrix.OuterProductUpdate(sigmaM, Sigma_xt, Sigma_xt, -beta_t); zeroOutSigmaXt(x_t); } } @Override public CategoricalResults classify(DataPoint data) { if(w == null) throw new UntrainedModelException("Model has not yet ben trained"); CategoricalResults cr = new CategoricalResults(2); double score = getScore(data); if(score < 0) cr.setProb(0, 1.0); else cr.setProb(1, 1.0); return cr; } @Override public double getScore(DataPoint dp) { return w.dot(dp.getNumericalValues()); } @Override public boolean supportsWeightedData() { return false; } /** * Guess the distribution to use for the regularization term * {@link #setC(double) C} . * * @param d the data set to get the guess for * @return the guess for the C parameter */ public static Distribution guessC(DataSet d) { return new LogUniform(Math.pow(2, -4), Math.pow(2, 4));//from Exact Soft Confidence-Weighted Learning paper } /** * Guess the distribution to use for the regularization term * {@link #setEta(double) &eta; } . * * @param d the data set to get the guess for * @return the guess for the C parameter */ public static Distribution guessEta(DataSet d) { return new Uniform(0.5, 0.95);//from Exact Soft Confidence-Weighted Learning paper } }
13,748
27.884454
122
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/SDCA.java
/* * Copyright (C) 2017 Edward Raff <[email protected]> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package jsat.classifiers.linear; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Random; import java.util.concurrent.ExecutorService; import jsat.DataSet; import jsat.SimpleWeightVectorModel; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.classifiers.WarmClassifier; import jsat.distributions.Distribution; import jsat.distributions.LogUniform; import jsat.distributions.Uniform; import jsat.exceptions.FailedToFitException; import jsat.linear.DenseVector; import jsat.linear.IndexValue; import jsat.linear.Vec; import jsat.lossfunctions.*; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.regression.RegressionDataSet; import jsat.regression.Regressor; import jsat.regression.WarmRegressor; import jsat.utils.IntList; import jsat.utils.ListUtils; import jsat.utils.random.RandomUtil; /** * This class implements the Proximal Stochastic Dual Coordinate Ascent (SDCA) * algorithm for learning general linear models with Elastic-Net regularization. * It is a fast learning algorithm, and can be used for generic Logistic or * least-squares regression with elastic-net regularization.<br> * It can work with any {@link LossFunc} to determine if it solves a * classification or regression problem, so long as the * {@link LossFunc#getConjugate(double, double, double) conjugate} method of the * loss is implemented properly. This is especially useful for more exotic * cases, like using the robust {@link HuberLoss Huber loss} in a L1 regularized * scenario. <br> * NOTE: The current implementation dose not support any multi-class loss * function, though that isn't a limitation of the algorithm. * * @author Edward Raff <[email protected]> */ public class SDCA implements Classifier, Regressor, Parameterized, SimpleWeightVectorModel, WarmClassifier, WarmRegressor { private LossFunc loss; private boolean useBias = true; private double tol = 0.001; private double lambda; private double alpha = 0.5; private int max_epochs = 200; private double[] dual_alphas; /** * Returns the number of epochs SDCA took until reaching convergence. */ protected int epochs_taken; private Vec[] ws; private double[] bs; /** * Creates a new SDCA learner for {@link LogisticLoss logistic-regression}. * Pure L2 or L1 regularization can be obtained using the * {@link #setAlpha(double) alpha} parameter. */ public SDCA() { this(1e-5); } /** * <br>The implementation will use Elastic-Net regularization by default. * Pure L2 or L1 regularization can be obtained using the * {@link #setAlpha(double) alpha} parameter. * @param lambda the regularization penalty to use. */ public SDCA(double lambda) { this(lambda, new LogisticLoss()); } /** * Creates a new SDCA learner for either a classification or regression * problem, the type of which is determined by the LossFunction given. * <br>The implementation will use Elastic-Net regularization by default. * Pure L2 or L1 regularization can be obtained using the * {@link #setAlpha(double) alpha} parameter. * * @param lambda the regularization penalty to use. * @param loss the loss function to use for training, which determines if it * implements classification or regression */ public SDCA(double lambda, LossFunc loss) { setLoss(loss); setLambda(lambda); } /** * Copy Constructor * @param toCopy the object to copy */ public SDCA(SDCA toCopy) { this.loss = toCopy.loss.clone(); this.useBias = toCopy.useBias; this.tol = toCopy.tol; this.lambda = toCopy.lambda; this.alpha = toCopy.alpha; this.max_epochs = toCopy.max_epochs; this.epochs_taken = toCopy.epochs_taken; if(toCopy.dual_alphas != null) this.dual_alphas = Arrays.copyOf(toCopy.dual_alphas, toCopy.dual_alphas.length); if(toCopy.ws != null) { this.ws = new Vec[toCopy.ws.length]; this.bs = new double[toCopy.bs.length]; for(int i = 0; i < toCopy.ws.length; i++) { this.ws[i] = toCopy.ws[i].clone(); this.bs[i] = toCopy.bs[i]; } } } /** * Sets whether or not an implicit bias term will be added to the data set * @param useBias {@code true} to add an implicit bias term */ public void setUseBias(boolean useBias) { this.useBias = useBias; } /** * Returns whether or not an implicit bias term is in use * @return {@code true} if a bias term is in use */ public boolean isUseBias() { return useBias; } /** * Sets the regularization term, where larger values indicate a larger * regularization penalty. * * @param lambda the positive regularization term */ @Parameter.WarmParameter(prefLowToHigh = false) public void setLambda(double lambda) { if(lambda <= 0 || Double.isInfinite(lambda) || Double.isNaN(lambda)) throw new IllegalArgumentException("Regularization term lambda must be a positive value, not " + lambda); this.lambda = lambda; } /** * * @return the regularization term */ public double getLambda() { return lambda; } /** * Using &alpha; = 1 corresponds to pure L<sub>1</sub> regularization, and * &alpha; = 0 corresponds to pure L<sub>2</sub> regularization. Any value * in-between is then an Elastic Net regularization. * * @param alpha the value in [0, 1] for determining the regularization * penalty's interpolation between pure L<sub>2</sub> and L<sub>1</sub> * regularization. */ public void setAlpha(double alpha) { if(alpha < 0 || alpha > 1 || Double.isNaN(alpha)) throw new IllegalArgumentException("alpha must be in [0, 1], not " + alpha); this.alpha = alpha; } /*** * * @return the fraction of weight (in [0, 1]) to apply to L<sub>1</sub> * regularization instead of L<sub>2</sub> regularization. */ public double getAlpha() { return alpha; } /** * Sets the maximum number of training iterations (epochs) for the algorithm. * * @param maxOuterIters the maximum number of outer iterations */ public void setMaxIters(int maxOuterIters) { if(maxOuterIters < 1) throw new IllegalArgumentException("Number of training iterations must be positive, not " + maxOuterIters); this.max_epochs = maxOuterIters; } /** * * @return the maximum number of training iterations */ public int getMaxIters() { return max_epochs; } /** * Sets the tolerance parameter for convergence. Smaller values will be more * exact, but larger values will converge faster. The default value is * fairly exact at {@value #DEFAULT_EPS}, increasing it by an order of * magnitude can often be done without hurting accuracy. * * @param e_out the tolerance parameter. */ public void setTolerance(double e_out) { if(e_out <= 0 || Double.isNaN(e_out)) throw new IllegalArgumentException("convergence tolerance paramter must be positive, not " + e_out); this.tol = e_out; } /** * * @return the convergence tolerance parameter */ public double getTolerance() { return tol; } /** * Sets the loss function used for the model. The loss function controls * whether or not regression, binary classification, or multi-class * classification is supported. <br> * <b>NOTE:</b> SDCA requires that the given loss function implement the * {@link LossFunc#getConjugate(double, double, double) conjugate} method, * otherwise it will not work with this algorithm. * * @param loss the loss function to use */ public void setLoss(LossFunc loss) { this.loss = loss; } /** * Returns the loss function in use * @return the loss function in use */ public LossFunc getLoss() { return loss; } @Override public CategoricalResults classify(DataPoint data) { Vec x = data.getNumericalValues(); if(ws.length == 1) return ((LossC)loss).getClassification(ws[0].dot(x)+bs[0]); else { Vec pred = new DenseVector(ws.length); for(int i = 0; i < ws.length; i++) pred.set(i, ws[i].dot(x)+bs[i]); ((LossMC)loss).process(pred, pred); return ((LossMC)loss).getClassification(pred); } } @Override public double regress(DataPoint data) { Vec x = data.getNumericalValues(); return ((LossR)loss).getRegression(ws[0].dot(x)+bs[0]); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { if(dataSet.getPredicting().getNumOfCategories() !=2) throw new RuntimeException("Current SDCA implementation only support binary classification problems"); double[] targets = new double[dataSet.size()]; for(int i = 0; i < targets.length; i++) targets[i] = dataSet.getDataPointCategory(i)*2-1; trainProxSDCA(dataSet, targets, null); } @Override public void train(ClassificationDataSet dataSet, Classifier warmSolution, boolean parallel) { train(dataSet, warmSolution); } @Override public void train(ClassificationDataSet dataSet, Classifier warmSolution) { if(warmSolution == null || !(warmSolution instanceof SDCA)) throw new FailedToFitException("SDCA implementation can only be warm-started from another instance of SDCA"); if(dataSet.getPredicting().getNumOfCategories() !=2) throw new RuntimeException("Current SDCA implementation only support binary classification problems"); double[] targets = new double[dataSet.size()]; for(int i = 0; i < targets.length; i++) targets[i] = dataSet.getDataPointCategory(i)*2-1; trainProxSDCA(dataSet, targets, ((SDCA)warmSolution).dual_alphas); } @Override public void train(RegressionDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(RegressionDataSet dataSet) { double[] targets = new double[dataSet.size()]; for(int i = 0; i < targets.length; i++) targets[i] = dataSet.getTargetValue(i); trainProxSDCA(dataSet, targets, null); } @Override public void train(RegressionDataSet dataSet, Regressor warmSolution, boolean parallel) { train(dataSet, warmSolution); } @Override public void train(RegressionDataSet dataSet, Regressor warmSolution) { double[] targets = new double[dataSet.size()]; for(int i = 0; i < targets.length; i++) targets[i] = dataSet.getTargetValue(i); trainProxSDCA(dataSet, targets, ((SDCA)warmSolution).dual_alphas); } private void trainProxSDCA(DataSet dataSet, double[] targets, double[] warm_alphas) { final int N = dataSet.size(); final int D = dataSet.getNumNumericalVars(); ws = new Vec[]{new DenseVector(D)}; DenseVector v = new DenseVector(D); bs = new double[1]; final double[] x_norms = new double[N]; double scaling = 1; /* * SDCA seems scale sensative for classification, but insensative for * regression. In fact, re-scaling is breaking regression... so lets * just not scale when doing regression! Weird, should dig in more later. */ final boolean is_regression = dataSet instanceof RegressionDataSet; for(int i = 0; i < N; i++) { x_norms[i] = dataSet.getDataPoint(i).getNumericalValues().pNorm(2); //Scaling seems to muck up regresion... so dont! if(!is_regression) scaling = Math.max(scaling, x_norms[i]); } for(int i = 0; i < N; i++) x_norms[i] /= scaling; final double lambda_effective; final double sigma_p; final double tol_effective; if(alpha == 1)//Lasso case, but we MUST have some l2 reg to make this work { /* * See Section 5.5 Lasso, in "Accelerated proximal stochastic dual * coordinate ascent for regularized loss minimization" paper. * y_bar is given for the regression case. It appears y_bar's * definition is in fact, the average loss of the 0 vector. We can * compute this quickly. */ //TODO add support for weights in this //TODO we don't need to iterate over all points. loss will have the same output for each class, we can just iterate on the labels and average by class proportions double y_bar = 0; for(int i = 0; i < N; i++) y_bar += loss.getLoss(0.0, targets[i]); y_bar /= N; sigma_p = lambda; lambda_effective = tol * Math.pow(lambda / Math.max(y_bar, 1e-7), 2) ; tol_effective = tol/2; } else { lambda_effective = lambda; sigma_p = (alpha/(1-alpha)); tol_effective = tol; } //set up the weight vector used during training. If using elatic-net, we will do lazy-updates of the values //otherswise, we can just re-use v final double[] w_lazy_backing; final DenseVector w_lazy; if(alpha > 0) { //We need a lazily updated w to keep our work sparse! w_lazy_backing = new double[D]; w_lazy = new DenseVector(w_lazy_backing); } else//alpha = 0, we can just re-use v! { w_lazy_backing =null; w_lazy = v; } //init dual alphas, and any warm-start on the solutions if (warm_alphas == null) dual_alphas = new double[N]; else { if (N != warm_alphas.length) throw new FailedToFitException("SDCA only supports warm-start training from the same dataset. A dataset of side " + N + " was given for training, but the warm solution was trained on " + warm_alphas.length + " points."); this.dual_alphas = Arrays.copyOf(warm_alphas, warm_alphas.length); for(int i = 0; i < N; i++) { v.mutableAdd(dual_alphas[i], dataSet.getDataPoint(i).getNumericalValues()); if (useBias) bs[0] += dual_alphas[i]; } //noramlize v.mutableDivide(scaling * lambda_effective * N); bs[0] /= (scaling * lambda_effective * N); } Random rand = RandomUtil.getRandom(); double gamma = loss.lipschitz(); IntList epoch_order = new IntList(N); ListUtils.addRange(epoch_order, 0, N, 1); epochs_taken = 0; int primal_converg_check = 0; for(int epoch = 0; epoch < max_epochs; epoch++) { double prevPrimal = Double.POSITIVE_INFINITY; epochs_taken++; double dual_loss_est = 0; double primal_loss_est = 0; Collections.shuffle(epoch_order, rand); for(int i : epoch_order) { double alpha_i_prev = dual_alphas[i]; Vec x = dataSet.getDataPoint(i).getNumericalValues(); double y = targets[i]; if(alpha > 0)//lets lazily determine what w should look like! for(IndexValue iv : x) { final int j = iv.getIndex(); final double v_j = v.get(j); final double v_j_sign = Math.signum(v_j); final double v_j_abs = Math.abs(v_j); w_lazy_backing[j] = v_j_sign * Math.max(v_j_abs-sigma_p, 0.0); } //else, w_lazy points to v, which has the correct values final double raw_score = w_lazy.dot(x)/scaling+bs[0]; //Option II final double lossD = loss.getDeriv(raw_score, y); double u = -lossD; double q = u - alpha_i_prev;//also called z in older paper double q_sqrd = q*q; if(q_sqrd <= 1e-32) continue;//avoid a NaN from div by zero //Option III double phi_i = loss.getLoss(raw_score, y); double conjg = loss.getConjugate(-alpha_i_prev, raw_score, y); double x_norm = x_norms[i]; double x_norm_sqrd = x_norm*x_norm; double denom = q_sqrd*(gamma+x_norm_sqrd/(lambda_effective*N)); double s = (phi_i + conjg + raw_score*alpha_i_prev + gamma*q_sqrd/2)/denom; s = Math.min(1, s); //for convergence check at end of epoch, record point estiamte of average primal and dual losses primal_loss_est += phi_i; if(!Double.isInfinite(conjg)) dual_loss_est += -conjg; if(s == 0) continue; double alpha_i_delta = s*q; //α(t)_i ←α(t−1)_i +∆α_i dual_alphas[i] += alpha_i_delta; //v^(t) ←v^(t−1) +(λn)^-1 X_i ∆α_i v.mutableAdd(alpha_i_delta/(scaling*lambda_effective*N), x); if(useBias) bs[0] += alpha_i_delta/(scaling*lambda_effective*N); //w^(t) ←∇g∗(v^(t)) //we do this lazily only when we need it! } //gap is technically missing an estiamte of the regularization terms in the primal-dual gap //But this looks close enough? Plus I don't need to do book keeping since w dosn't exist fully... double gap = Math.abs(primal_loss_est-dual_loss_est)/N; // System.out.println("Epoch " + epoch + " has gap " + gap); if(gap < tol_effective) { // System.out.println("\tGap: " + gap + " Epoch: " + epoch); break; } //Sometime's gap dosn't work well when alphas hit weird ranges //lets check if the primal hasn't changed much in a while if(prevPrimal-primal_loss_est/N < tol_effective/5) { if(primal_converg_check++ > 10) break; } else primal_converg_check = 0; prevPrimal = primal_loss_est/N; } //apply full sparsity patternt to w for(int j = 0; j < D; j++) { final double v_j = v.get(j); final double v_j_sign = Math.signum(v_j); final double v_j_abs = Math.abs(v_j); ws[0].set(j, v_j_sign * Math.max(v_j_abs - sigma_p, 0.0)/scaling); } // System.out.println(ws[0].nnz() + " " + lambda + " " + sigma_p + " " + ws[0]); } @Override public boolean supportsWeightedData() { return false; } @Override public SDCA clone() { return new SDCA(this); } @Override public Vec getRawWeight(int index) { return ws[index]; } @Override public double getBias(int index) { return bs[index]; } @Override public int numWeightsVecs() { return ws.length; } @Override public boolean warmFromSameDataOnly() { return true; } /** * Guess the distribution to use for the regularization term * {@link #setLambda(double) lambda}. * * @param d the data set to get the guess for * @return the guess for the lambda parameter */ public static Distribution guessLambda(DataSet d) { int N = d.size(); return new LogUniform(1.0/(N*50), Math.min(1.0/(N/50), 1.0)); } /** * Guess the distribution to use for the regularization balance * {@link #setAlpha(double) alpha}. * * @param d the data set to get the guess for * @return the guess for the lambda parameter */ public static Distribution guessAlpha(DataSet d) { return new Uniform(0.0, 0.5); } }
21,983
32.978362
236
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/SMIDAS.java
package jsat.classifiers.linear; import static java.lang.Math.*; import java.util.Arrays; import java.util.Random; import java.util.concurrent.ExecutorService; import jsat.DataSet; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.DataPoint; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.linear.DenseVector; import jsat.linear.IndexValue; import jsat.linear.Vec; import jsat.regression.RegressionDataSet; import jsat.utils.random.RandomUtil; /** * Implements the iterative and single threaded stochastic solver for * L<sub>1</sub> regularized linear regression problems SMIDAS (Stochastic * Mirror Descent Algorithm mAde Sparse). It performs very well when the number * of features is large relative to or greater than the number of data points. * It also works decently on smaller sparse data sets. <br> * Using the squared loss is equivalent to LASSO regression, and the LOG loss * is equivalent to logistic regression. <br> * <br> * Note: This implementation requires all feature values to be in the range * [-1, 1]. By default scaling is performed to [0,1] to preserve sparseness. If * your data is dense or has negative and positive feature values, scaling to * [-1, 1] may perform better. * See {@link #setReScale(boolean) }<br> * <br> * See:<br> * <a href="http://eprints.pascal-network.org/archive/00005418/">Shalev-Shwartz, * S.,&amp;Tewari, A. (2009). <i>Stochastic Methods for L<sub>1</sub>-regularized * Loss Minimization</i>. 26th International Conference on Machine Learning * (Vol. 12, pp. 929–936).</a> * * @author Edward Raff */ public class SMIDAS extends StochasticSTLinearL1 { private static final long serialVersionUID = -4888083541600164597L; private double eta; /** * Creates a new SMIDAS learner * @param eta the learning rate for each iteration */ public SMIDAS(double eta) { this(eta, DEFAULT_EPOCHS, DEFAULT_REG, DEFAULT_LOSS); } /** * Creates a new SMIDAS learner * @param eta the learning rate for each iteration * @param epochs the number of learning iterations * @param lambda the regularization penalty * @param loss the loss function to use */ public SMIDAS(double eta, int epochs, double lambda, Loss loss) { this(eta, epochs, lambda, loss, true); } /** * Creates a new SMIDAS learner * @param eta the learning rate for each iteration * @param epochs the number of learning iterations * @param lambda the regularization penalty * @param loss the loss function to use * @param reScale whether or not to rescale the feature values */ public SMIDAS(double eta, int epochs, double lambda, Loss loss, boolean reScale) { setEta(eta); setEpochs(epochs); setLambda(lambda); setLoss(loss); setReScale(reScale); } /** * Sets the learning rate used during training * @param eta the learning rate to use */ public void setEta(double eta) { if(Double.isNaN(eta) || Double.isInfinite(eta) || eta <= 0) throw new ArithmeticException("convergence parameter must be a positive value"); this.eta = eta; } /** * Returns the current learning rate used during training * @return the learning rate in use */ public double getEta() { return eta; } @Override public CategoricalResults classify(DataPoint data) { if(w == null) throw new UntrainedModelException("Model has not been trained"); Vec x = data.getNumericalValues(); return loss.classify(wDot(x)); } @Override public double regress(DataPoint data) { if(w == null) throw new UntrainedModelException("Model has not been trained"); Vec x = data.getNumericalValues(); return loss.regress(wDot(x)); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { if(dataSet.getNumNumericalVars() < 3) throw new FailedToFitException("SMIDAS requires at least 3 features"); else if(dataSet.getClassSize() != 2) throw new FailedToFitException("SMIDAS only supports binary classification problems"); Vec[] x = setUpVecs(dataSet); Vec obvMinV = DenseVector.toDenseVec(obvMin); Vec obvMaxV = DenseVector.toDenseVec(obvMax); Vec multitpliers = new DenseVector(obvMaxV.length()); multitpliers.mutableAdd(maxScaled-minScaled); multitpliers.mutablePairwiseDivide(obvMaxV.subtract(obvMinV)); boolean allZeroMins = true; for(double min : obvMin) if(min != 0) allZeroMins = false; double[] target = new double[x.length]; for(int i = 0; i < dataSet.size(); i++) { //Copy and scale each value if(allZeroMins && minScaled == 0.0) { x[i].mutablePairwiseMultiply(multitpliers); } else//destroy all sparsity and our dreams { x[i] = x[i].subtract(obvMinV); x[i].mutablePairwiseMultiply(multitpliers); x[i].mutableAdd(minScaled); } target[i] = dataSet.getDataPointCategory(i)*2-1; } train(x, target); } @Override public void train(RegressionDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(RegressionDataSet dataSet) { if(dataSet.getNumNumericalVars() < 3) throw new FailedToFitException("SMIDAS requires at least 3 features"); Vec[] x = setUpVecs(dataSet); Vec obvMinV = DenseVector.toDenseVec(obvMin); Vec obvMaxV = DenseVector.toDenseVec(obvMax); Vec multitpliers = new DenseVector(obvMaxV.length()); multitpliers.mutableAdd(maxScaled-minScaled); multitpliers.mutablePairwiseDivide(obvMaxV.subtract(obvMinV)); boolean allZeroMins = true; for(double min : obvMin) if(min != 0) allZeroMins = false; double[] target = new double[x.length]; for(int i = 0; i < dataSet.size(); i++) { if(allZeroMins && minScaled == 0.0) { x[i].mutablePairwiseMultiply(multitpliers); } else { //Copy and scale each value x[i] = x[i].subtract(obvMinV); x[i].mutablePairwiseMultiply(multitpliers); x[i].mutableAdd(minScaled); } target[i] = dataSet.getTargetValue(i); } train(x, target); } private void train(Vec[] x, double[] y) { final int m = x.length; final int d = x[0].length(); final double p = 2*Math.log(d); Vec theta = new DenseVector(d); double theta_bias = 0; double lossScore = 0; w = new DenseVector(d); Random rand = RandomUtil.getRandom(); for(int t = 0; t < epochs; t++) { int i = rand.nextInt(m); lossScore = loss.deriv(w.dot(x[i])+bias, y[i]); theta.mutableSubtract(eta*lossScore, x[i]); theta_bias -= eta*lossScore; for(IndexValue iv : theta) { int j = iv.getIndex(); double theta_j = iv.getValue();//theta.get(j); theta.set(j, signum(theta_j)*max(0, abs(theta_j)-eta*lambda)); } theta_bias = signum(theta_bias)*max(0, abs(theta_bias)-eta*lambda); final double thetaNorm = theta.pNorm(p); if(thetaNorm > 0) { //w = f^-1(theta) final double logThetaNorm = log(thetaNorm); for(int j = 0; j < w.length(); j++) { double theta_j = theta.get(j); w.set(j, signum(theta_j) * exp((p-1) * log(abs(theta_j)) - (p-2) * logThetaNorm)); } bias = signum(theta_bias)*exp((p-1) * log(abs(theta_bias)) - (p-2) * logThetaNorm); } else { theta.zeroOut(); theta_bias = 0; w.zeroOut(); bias = 0; } } } @Override public boolean supportsWeightedData() { return true; } @Override public SMIDAS clone() { SMIDAS clone = new SMIDAS(eta, epochs, lambda, loss, reScale); if(this.w != null) clone.w = this.w.clone(); clone.bias = this.bias; clone.minScaled = this.minScaled; clone.maxScaled = this.maxScaled; if(this.obvMin != null) clone.obvMin = Arrays.copyOf(this.obvMin, this.obvMin.length); if(this.obvMax != null) clone.obvMax = Arrays.copyOf(this.obvMax, this.obvMax.length); return clone; } private Vec[] setUpVecs(DataSet dataSet) { obvMin = new double[dataSet.getNumNumericalVars()]; Arrays.fill(obvMin, Double.POSITIVE_INFINITY); obvMax = new double[dataSet.getNumNumericalVars()]; Arrays.fill(obvMax, Double.NEGATIVE_INFINITY); Vec[] x = new Vec[dataSet.size()]; for(int i = 0; i < dataSet.size(); i++) { x[i] = dataSet.getDataPoint(i).getNumericalValues(); for(IndexValue iv : x[i]) { int j = iv.getIndex(); double v = iv.getValue(); obvMin[j] = Math.min(obvMin[j], v); obvMax[j] = Math.max(obvMax[j], v); } } if(x[0].isSparse())//Assume implicit min zeros from sparsity for(int i = 0; i < obvMin.length; i++) obvMin[i] = Math.min(obvMin[i], 0); if(!reScale) { for(double min : obvMin) if(min < -1) throw new FailedToFitException("Values must be in the range [-1,1], " + min + " violation encountered"); for(double max : obvMax) if(max > 1) throw new FailedToFitException("Values must be in the range [-1,1], " + max + " violation encountered"); } return x; } }
10,704
32.142415
124
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/SPA.java
package jsat.classifiers.linear; import static java.lang.Math.*; import java.util.*; import jsat.DataSet; import jsat.SimpleWeightVectorModel; import jsat.classifiers.*; import jsat.distributions.Distribution; import jsat.linear.DenseVector; import jsat.linear.Vec; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.utils.IndexTable; /** * Support class Passive Aggressive (SPA) is a multi class generalization of * {@link PassiveAggressive}. It works in the same philosophy, and can obtain * better multi class accuracy then PA used with a meta learner. <br> * SPA is more sensitive to small values for the {@link #setC(double) * aggressiveness parameter}. <br> * If working with a binary classification problem, SPA reduces to PA, and the * original PA implementation should be used instead. <br> * By default, the {@link #setUseBias(boolean) biast term} is not used. * <br><br> * See: <br> * Matsushima, S., Shimizu, N., Yoshida, K., Ninomiya, T.,&amp;Nakagawa, H. * (2010). <i>Exact Passive-Aggressive Algorithm for Multiclass Classification * Using Support Class</i>. SIAM International Conference on Data Mining - SDM * (pp. 303–314). Retrieved from * <a href="https://www.siam.org/proceedings/datamining/2010/dm10_027_matsushimas.pdf">here</a> * * @author Edward Raff */ public class SPA extends BaseUpdateableClassifier implements Parameterized, SimpleWeightVectorModel { private static final long serialVersionUID = 3613279663279244169L; private Vec[] w; private double[] bias; private double C = 1; private boolean useBias = false; private PassiveAggressive.Mode mode; /** * Creates a new Passive Aggressive learner that does 10 epochs and uses * PA2. */ public SPA() { this(10, PassiveAggressive.Mode.PA2); } /** * Creates a new Passive Aggressive learner * * @param epochs the number of training epochs to use during batch training * @param mode which version of the update to perform */ public SPA(int epochs, PassiveAggressive.Mode mode) { setEpochs(epochs); setMode(mode); } /** * Sets whether or not the implementation will use an implicit bias term * appended to the inputs or not. * @param useBias {@code true} to add an implicit bias term, {@code false} * to use the data as given */ public void setUseBias(boolean useBias) { this.useBias = useBias; } /** * Returns true if an implicit bias term will be added, false otherwise * @return true if an implicit bias term will be added, false otherwise */ public boolean isUseBias() { return useBias; } /** * Set the aggressiveness parameter. Increasing the value of this parameter * increases the aggressiveness of the algorithm. It must be a positive * value. This parameter essentially performs a type of regularization on * the updates * <br> * An infinitely large value is equivalent to being completely aggressive, * and is performed when the mode is set to {@link PassiveAggressive.Mode#PA}. * * @param C the positive aggressiveness parameter */ public void setC(double C) { if(Double.isNaN(C) || Double.isInfinite(C) || C <= 0) throw new ArithmeticException("Aggressiveness must be a positive constant"); this.C = C; } /** * Returns the aggressiveness parameter * @return the aggressiveness parameter */ public double getC() { return C; } /** * Sets which version of the PA update is used. * @param mode which PA update style to perform */ public void setMode(PassiveAggressive.Mode mode) { this.mode = mode; } /** * Returns which version of the PA update is used * @return which PA update style is used */ public PassiveAggressive.Mode getMode() { return mode; } @Override public Vec getRawWeight(int index) { return w[index]; } @Override public double getBias(int index) { return bias[index]; } @Override public int numWeightsVecs() { return w.length; } @Override public SPA clone() { SPA clone = new SPA(); if(this.w != null) { clone.w = new Vec[this.w.length]; for(int i = 0; i < w.length; i++) clone.w[i] = this.w[i].clone(); } if(this.it != null) clone.it = new IndexTable(this.it.length()); if(this.loss != null) clone.loss = Arrays.copyOf(this.loss, this.loss.length); clone.C = this.C; clone.mode = this.mode; if(this.bias != null) clone.bias = Arrays.copyOf(this.bias, this.bias.length); clone.useBias = this.useBias; return clone; } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { w = new Vec[predicting.getNumOfCategories()]; for(int i = 0; i < w.length; i++) w[i] = new DenseVector(numericAttributes); bias = new double[w.length]; loss = new double[w.length]; it = new IndexTable(w.length); } private double[] loss; private IndexTable it; /** * Part A of SPA algorithm * @param xNorm the value of the squared 2 norm training input * @param k the value of k * @param loss_k the loss of the k'th sorted value * @return the target support class goal to be less than */ private double getSupportClassGoal(final double xNorm, final int k, final double loss_k) { if(mode == PassiveAggressive.Mode.PA1) return min((k-1)*loss_k+C*xNorm, k*loss_k); else if(mode == PassiveAggressive.Mode.PA2) return ((k*xNorm+(k-1)/(2*C))/(xNorm+1.0/(2*C)))*loss_k; else return k*loss_k; } /** * Part B of SPA algorithm * @param loss_cur the loss for the current value in consideration * @param xNorm the value of the squared 2 norm training input * @param k the value of k (number of support classes +1) * @param supLossSum the sum of the loss for the support classes * @return the update step size */ private double getStepSize(final double loss_cur, final double xNorm, int k, final double supLossSum) { if(mode == PassiveAggressive.Mode.PA1) return max(0, loss_cur-max(supLossSum/(k-1)-C/(k-1)*xNorm, supLossSum/k))/xNorm; else if(mode == PassiveAggressive.Mode.PA2) return max(0, loss_cur-(xNorm+1/(2*C))/(k*xNorm+(k-1)/(2*C))*supLossSum )/xNorm; else return max(0, loss_cur-supLossSum/k)/xNorm; } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { Vec x = dataPoint.getNumericalValues(); final double w_y_dot_x = w[targetClass].dot(x) + bias[targetClass]; for (int v = 0; v < w.length; v++) if (v != targetClass) loss[v] = max(0, 1 - (w_y_dot_x - w[v].dot(x) - bias[v])); else loss[v] = Double.POSITIVE_INFINITY;//set in Inft so its ends up in index 0, and gets skipped final double xNorm = pow(x.pNorm(2) + (useBias ? 1 : 0), 2); it.sortR(loss); int k = 1; double T31 = 0;//Theorem 3.1 while (k < loss.length && T31 < getSupportClassGoal(xNorm, k, loss[it.index(k)])) T31 += loss[it.index(k++)]; double supportLossSum = 0; for (int j = 1; j < k; j++) supportLossSum += loss[it.index(j)]; for (int j = 1; j < k; j++) { final int v = it.index(j); double tau = getStepSize(loss[v], xNorm, k, supportLossSum); w[targetClass].mutableAdd(tau, x); w[v].mutableSubtract(tau, x); if (useBias) { bias[targetClass] += tau; bias[v] -= tau; } } } @Override public CategoricalResults classify(DataPoint data) { Vec x = data.getNumericalValues(); CategoricalResults cr = new CategoricalResults(w.length); int maxIdx = 0; double maxVAl = w[0].dot(x)+bias[0]; for(int i = 1; i < w.length; i++) { double val = w[i].dot(x)+bias[i]; if(val > maxVAl) { maxVAl = val; maxIdx = i; } } cr.setProb(maxIdx, 1.0); return cr; } @Override public boolean supportsWeightedData() { return false; } /** * Guess the distribution to use for the regularization term * {@link #setC(double) C} in Support PassiveAggressive. * * @param d the data set to get the guess for * @return the guess for the C parameter */ public static Distribution guessC(DataSet d) { return PassiveAggressive.guessC(d); } }
9,206
30.10473
113
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/STGD.java
package jsat.classifiers.linear; import java.util.*; import java.util.concurrent.ExecutorService; import jsat.SingleWeightVectorModel; import jsat.classifiers.*; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.exceptions.FailedToFitException; import jsat.linear.DenseVector; import jsat.linear.IndexValue; import jsat.linear.Vec; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.regression.BaseUpdateableRegressor; import jsat.regression.RegressionDataSet; import jsat.regression.UpdateableRegressor; /** * This provides an implementation of Sparse Truncated Gradient Descent for * L<sub>1</sub> regularized linear classification and regression on sparse data * sets. * <br><br> * Unlike normal L<sub>1</sub> regression, regularization is controlled by the * {@link #setGravity(double) gravity} parameter, but other parameters * contribute to the level of sparsity. * <br><br> * See: Langford, J., Li, L.,&amp;Zhang, T. (2009). <i>Sparse online learning via * truncated gradient</i>. The Journal of Machine Learning Research, 10, * 777–801. Retrieved from <a href="http://dl.acm.org/citation.cfm?id=1577097"> * here</a> * @author Edward Raff */ public class STGD extends BaseUpdateableClassifier implements UpdateableRegressor, BinaryScoreClassifier, Parameterized, SingleWeightVectorModel { private static final long serialVersionUID = 5753298014967370769L; private Vec w; private int K; private double learningRate; private double threshold; private double gravity; private int time; private int[] t; /** * Creates a new STGD learner * @param K the regularization frequency * @param learningRate the learning rate to use * @param threshold the regularization threshold * @param gravity the regularization parameter */ public STGD(int K, double learningRate, double threshold, double gravity) { setK(K); setLearningRate(learningRate); setThreshold(threshold); setGravity(gravity); } /** * Copy constructor * @param toCopy the object to copy */ protected STGD(STGD toCopy) { if(toCopy.w != null) this.w = toCopy.w.clone(); this.K = toCopy.K; this.learningRate = toCopy.learningRate; this.threshold = toCopy.threshold; this.gravity = toCopy.gravity; this.time = toCopy.time; if(toCopy.t != null) this.t = Arrays.copyOf(toCopy.t, toCopy.t.length); } /** * Sets the frequency of applying the {@link #setGravity(double) gravity} * parameter to the weight vector. This value must be positive, and the * gravity will be applied every <i>K</i> updates. Increasing this value * encourages greater sparsity. * * @param K the frequency to apply regularization in [1, Infinity ) */ public void setK(int K) { if(K < 1) throw new IllegalArgumentException("K must be positive, not " + K); this.K = K; } /** * Returns the frequency of regularization application * @return the frequency of regularization application */ public int getK() { return K; } /** * Sets the learning rate to use * @param learningRate the learning rate &gt; 0. */ public void setLearningRate(double learningRate) { if(Double.isInfinite(learningRate) || Double.isNaN(learningRate) || learningRate <= 0) throw new IllegalArgumentException("Learning rate must be positive, not " + learningRate); this.learningRate = learningRate; } /** * Returns the learning rate to use * @return the learning rate to use */ public double getLearningRate() { return learningRate; } /** * Sets the threshold for a coefficient value to avoid regularization. While * a coefficient reaches this magnitude, regularization will not be applied. * @param threshold the coefficient regularization threshold in * ( 0, Infinity ] */ public void setThreshold(double threshold) { if(Double.isNaN(threshold) || threshold <= 0) throw new IllegalArgumentException("Threshold must be positive, not " + threshold); this.threshold = threshold; } /** * Returns the coefficient threshold parameter * @return the coefficient threshold parameter */ public double getThreshold() { return threshold; } /** * Sets the gravity regularization parameter that "weighs down" the * coefficient values. Larger gravity values impose stronger regularization, * and encourage greater sparsity. * * @param gravity the regularization parameter in ( 0, Infinity ) */ public void setGravity(double gravity) { if(Double.isInfinite(gravity) || Double.isNaN(gravity) || gravity <= 0) throw new IllegalArgumentException("Gravity must be positive, not " + gravity); this.gravity = gravity; } /** * Returns the regularization parameter * @return the regularization parameter */ public double getGravity() { return gravity; } @Override public Vec getRawWeight() { return w; } @Override public double getBias() { return 0; } @Override public Vec getRawWeight(int index) { if(index < 1) return getRawWeight(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public double getBias(int index) { if (index < 1) return getBias(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public int numWeightsVecs() { return 1; } @Override public STGD clone() { return new STGD(this); } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { if(predicting.getNumOfCategories() != 2) throw new FailedToFitException("STGD supports only binary classification"); setUp(categoricalAttributes, numericAttributes); } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes) { if(numericAttributes < 1) throw new FailedToFitException("STGD requires numeric features"); w = new DenseVector(numericAttributes); t = new int[numericAttributes]; } @Override public void train(RegressionDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(RegressionDataSet dataSet) { BaseUpdateableRegressor.trainEpochs(dataSet, this, getEpochs()); } private static double T(double v_j, double a, double theta) { if(v_j >= 0 && v_j <= theta) return Math.max(0, v_j-a); else if(v_j <= 0 && v_j >= -theta) return Math.min(0, v_j+a); else return v_j; } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { time++; final Vec x = dataPoint.getNumericalValues(); final int y = targetClass*2-1; final int yHat = (int) Math.signum(w.dot(x)); if(yHat == y)//Not part of the described algorithm (using signum), but needed return; performUpdate(x, y, yHat); } @Override public void update(DataPoint dataPoint, double weight, final double y) { time++; final Vec x = dataPoint.getNumericalValues(); final double yHat = w.dot(x); performUpdate(x, y, yHat); } /** * Performs the sparse update of the weight vector * @param x the input vector * @param y the true value * @param yHat the predicted value */ private void performUpdate(final Vec x, final double y, final double yHat) { for(IndexValue iv : x) { final int j = iv.getIndex(); w.set(j, T(w.get(j)+2*learningRate*(y-yHat)*iv.getValue(), ((time-t[j])/K)*gravity*learningRate, threshold)); t[j] += ((time-t[j])/K)*K; } } @Override public CategoricalResults classify(DataPoint data) { CategoricalResults cr = new CategoricalResults(2); if(getScore(data) > 0) cr.setProb(1, 1.0); else cr.setProb(0, 1.0); return cr; } @Override public double regress(DataPoint data) { return getScore(data); } @Override public boolean supportsWeightedData() { return false; } @Override public double getScore(DataPoint dp) { return w.dot(dp.getNumericalValues()); } }
9,087
27.311526
144
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/StochasticMultinomialLogisticRegression.java
package jsat.classifiers.linear; import static java.lang.Math.*; import java.util.*; import java.util.concurrent.ExecutorService; import jsat.SimpleWeightVectorModel; import jsat.classifiers.*; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.linear.ConstantVector; import jsat.linear.DenseVector; import jsat.linear.IndexValue; import jsat.linear.Vec; import jsat.math.MathTricks; import jsat.math.decayrates.DecayRate; import jsat.math.decayrates.ExponetialDecay; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.utils.IntList; import jsat.utils.ListUtils; /** * This is a Stochastic implementation of Multinomial Logistic Regression. It * supports regularization from several different priors, and performs prior * updates in a lazy fashion to avoid destroying the sparsity of training * inputs. * <br> * Algorithm is based on the technical report:<br> * Carpenter, B. (2008). <i>Lazy Sparse Stochastic Gradient Descent for * Regularized Mutlinomial Logistic Regression</i>. Retrieved from * http://lingpipe-blog.com/lingpipe-white-papers/ * * @author Edward Raff */ public class StochasticMultinomialLogisticRegression implements Classifier, Parameterized, SimpleWeightVectorModel { private static final long serialVersionUID = -492707881682847556L; private int epochs; private boolean clipping = true; private double regularization; private double tolerance = 1e-4; private double initialLearningRate; private double alpha = 0.5; private DecayRate learningRateDecay = new ExponetialDecay(); private Prior prior; private boolean standardized = true; private boolean useBias = true; private int miniBatchSize = 1; private Vec[] B; private double[] biases; /** * Creates a new Stochastic Multinomial Logistic Regression object * @param initialLearningRate the initial learning rate to use * @param epochs the maximum number of training epochs to go through * @param regularization the scale factor applied to the regularization term * @param prior the prior to use for regularization */ public StochasticMultinomialLogisticRegression(double initialLearningRate, int epochs, double regularization, Prior prior) { setEpochs(epochs); setRegularization(regularization); setInitialLearningRate(initialLearningRate); setPrior(prior); } /** * Creates a new Stochastic Multinomial Logistic Regression that uses a * {@link Prior#GAUSSIAN} prior with a regularization scale of 1e-6. * * @param initialLearningRate the initial learning rate to use * @param epochs the maximum number of training epochs to go through */ public StochasticMultinomialLogisticRegression(double initialLearningRate, int epochs) { this(initialLearningRate, epochs, 1e-6, Prior.GAUSSIAN); } /** * Creates a new Stochastic Multinomial Logistic Regression that uses a * {@link Prior#GAUSSIAN} prior with a regularization scale of 1e-6. It * will do at most 50 epochs with a learning rate of 0.1 */ public StochasticMultinomialLogisticRegression() { this(0.1, 50); } /** * Copy constructor * @param toClone the classifier to create a copy of */ protected StochasticMultinomialLogisticRegression(StochasticMultinomialLogisticRegression toClone) { this.epochs = toClone.epochs; this.clipping = toClone.clipping; this.regularization = toClone.regularization; this.tolerance = toClone.tolerance; this.initialLearningRate = toClone.initialLearningRate; this.alpha = toClone.alpha; this.learningRateDecay = toClone.learningRateDecay; this.prior = toClone.prior; this.standardized = toClone.standardized; if(toClone.B != null) { this.B = new Vec[toClone.B.length]; for(int i = 0; i < toClone.B.length; i++) this.B[i] = toClone.B[i].clone(); } if(toClone.biases != null) this.biases = Arrays.copyOf(toClone.biases, toClone.biases.length); } /** * Represents a prior of the coefficients that can be applied to perform * regularization. */ public enum Prior { /** * A Gaussian prior, this is equivalent to L<sub>2</sub> regularization. */ GAUSSIAN { @Override protected double gradientError(double b_i, double s_i) { return - b_i/s_i; } @Override protected double logProb(double b_i, double s_i) { return -0.5*log(2*PI*s_i)-2*b_i*b_i*s_i/2; } }, /** * A Laplace prior, this is equivalent to L<sub>1</sub> regularization */ LAPLACE { @Override protected double gradientError(double b_i, double s_i) { return - sqrt(2)*signum(b_i)/sqrt(s_i); } @Override protected double logProb(double b_i, double s_i) { return -signum(b_i)*sqrt(2)*b_i/sqrt(s_i)-0.5*log(2*s_i); } }, /** * This is the Elastic Net prior, and it uses the extra * {@link #setAlpha(double) alpha} parameter. This prior is a mix of * both {@link #LAPLACE} and {@link #GAUSSIAN}. Alpha should be in the * range [0,1]. Alpha weight will be applied to the Laplace prior, and * (1-alpha) weight will be applied to the Gaussian prior. The extreme * values of this collapse into the Laplace and Gaussian priors. */ ELASTIC { @Override protected double gradientError(double b_i, double s_i) { throw new UnsupportedOperationException(); } @Override protected double gradientError(double b_i, double s_i, double alpha) { return alpha*LAPLACE.gradientError(b_i, s_i) + (1-alpha)*GAUSSIAN.gradientError(b_i, s_i); } @Override protected double logProb(double b_i, double s_i) { return Double.NaN; } @Override protected double logProb(double b_i, double s_i, double alpha) { return alpha*LAPLACE.logProb(b_i, s_i) + (1-alpha)*GAUSSIAN.logProb(b_i, s_i); } }, /** * This is a prior from the Cauchy (student-t) distribution, and it uses * the extra {@link #setAlpha(double) alpha} parameter. Alpha should be * in the range (0, Infty). */ CAUCHY { @Override protected double gradientError(double b_i, double s_i) { throw new UnsupportedOperationException(); } @Override protected double gradientError(double b_i, double s_i, double alpha) { return - 2*b_i/(b_i*b_i+alpha*alpha); } @Override protected double logProb(double b_i, double s_i) { return Double.NaN; } @Override protected double logProb(double b_i, double s_i, double alpha) { return -log(PI)+log(alpha)-log(b_i*b_i+alpha*alpha); } }, /** * This is the Uniform prior. The uniform prior is equivalent to * no regularization. */ UNIFORM { @Override protected double gradientError(double b_i, double s_i) { return 0; } @Override protected double logProb(double b_i, double s_i) { return 0; } }; abstract protected double gradientError(double b_i, double s_i); protected double gradientError(double b_i, double s_i, double alpha) { return gradientError(b_i, s_i); } abstract protected double logProb(double b_i, double s_i); protected double logProb(double b_i, double s_i, double alpha) { return logProb(b_i, s_i); } } /** * Sets whether or not to learn the bias term for a model. If no bias term * is in use, the model learned must pass through the origin of the world. * The use of the bias term is very important for low dimensional problems, * but less so for many higher dimensional problems. * @param useBias {@code true} if the bias term should be used, * {@code false} otherwise */ public void setUseBias(boolean useBias) { this.useBias = useBias; } /** * Returns {@code true} if the bias term is in use * @return {@code true} if the bias term is in use */ public boolean isUseBias() { return useBias; } /** * Sets the maximum number of epochs that occur in each iteration. Each * epoch goes through the whole data set once. * * @param epochs the maximum number of epochs to train */ public void setEpochs(int epochs) { if(epochs <= 0) throw new IllegalArgumentException("Number of epochs must be positive"); this.epochs = epochs; } /** * Returns the maximum number of epochs * @return the maximum number of epochs */ public int getEpochs() { return epochs; } /** * Sets the extra parameter alpha. This is used for some priors that take * an extra parameter. This is {@link Prior#CAUCHY} and * {@link Prior#ELASTIC}. If these two priors are not in use, the value is * ignored. * * @param alpha the extra parameter value to use. Must be positive */ public void setAlpha(double alpha) { if(alpha < 0 || Double.isNaN(alpha) || Double.isInfinite(alpha)) throw new IllegalArgumentException("Extra parameter must be non negative, not " + alpha); this.alpha = alpha; } /** * Returns the extra parameter value * @return the extra parameter value */ public double getAlpha() { return alpha; } /** * Sets whether or not the clip changes in coefficient values caused by * regularization so that they can not make the coefficients go from * positive to negative or negative to positive. If clipping is on, the * value will go to zero instead. If off, the value will be allowed to * change signs. <br> * If there is no regularization, this has no impact. * * @param clipping {@code true} if clipping should be used, false otherwise */ public void setClipping(boolean clipping) { this.clipping = clipping; } /** * Returns whether or not coefficient clipping is on. * @return {@code true} if clipping is on. */ public boolean isClipping() { return clipping; } /** * Sets the initial learning rate to use for the first epoch. The learning * rate will decay according to the * {@link #setLearningRateDecay(jsat.math.decayrates.DecayRate) decay rate} * in use. * * @param initialLearningRate the initial learning rate to use */ public void setInitialLearningRate(double initialLearningRate) { if(initialLearningRate <= 0 || Double.isInfinite(initialLearningRate) || Double.isNaN(initialLearningRate)) throw new IllegalArgumentException("Learning rate must be a positive constant, not " + initialLearningRate); this.initialLearningRate = initialLearningRate; } /** * Returns the current initial learning rate * @return the learning rate in use */ public double getInitialLearningRate() { return initialLearningRate; } /** * Sets the decay rate used to reduce the learning rate after each epoch. * * @param learningRateDecay the decay rate to use */ public void setLearningRateDecay(DecayRate learningRateDecay) { this.learningRateDecay = learningRateDecay; } /** * Returns the decay rate in use * @return the decay rate in use */ public DecayRate getLearningRateDecay() { return learningRateDecay; } /** * Sets the coefficient applied to the regularization penalty at each * update. This is usual set to a small value less than 1. If set to zero, * it effectively turns off the use of regularization. * * @param regularization the non negative regularization coefficient to apply */ public void setRegularization(double regularization) { if(regularization < 0 || Double.isNaN(regularization) || Double.isInfinite(regularization)) throw new IllegalArgumentException("Regualrization must be a non negative constant, not " + regularization); this.regularization = regularization; } /** * Returns the regularization coefficient in use * @return the regularization coefficient in use */ public double getRegularization() { return regularization; } /** * Sets the prior used to perform regularization * @param prior the prior to use */ public void setPrior(Prior prior) { this.prior = prior; } /** * Returns the prior used for regularization * @return the prior used */ public Prior getPrior() { return prior; } /** * Sets the tolerance that determines when the training stops early because * the change has become too insignificant. * * @param tolerance the minimum change in log likelihood to stop training */ public void setTolerance(double tolerance) { this.tolerance = tolerance; } /** * Returns the minimum tolerance for early stopping. * @return the minimum change in log likelihood to stop training */ public double getTolerance() { return tolerance; } /** * Sets whether or not to perform implicit standardization of the feature * values when performing regularization by the prior. If set on, the input * data will be adjusted to have zero mean and unit variance. This is done * without destroying sparsity. If there is not regularization, this * parameter has no impact. * * @param standardized {@code true} if the input will be standardized, * {@code false} if ti will be left as is. */ public void setStandardized(boolean standardized) { this.standardized = standardized; } /** * Returns whether or not the input is standardized for the priors * @return {@code true} if the input is standardized for the priors */ public boolean isStandardized() { return standardized; } /** * Sets the amount of data points used to form each gradient update. * Increasing the batch size can help convergence. By default, a mini batch * size of 1 is used. * * @param miniBatchSize the number of data points used to perform each * update */ public void setMiniBatchSize(int miniBatchSize) { this.miniBatchSize = miniBatchSize; } /** * Returns the number of data points used to perform each gradient update * @return the number of data points used to perform each gradient update */ public int getMiniBatchSize() { return miniBatchSize; } @Override public Vec getRawWeight(int index) { if(index == B.length) return new ConstantVector(0, B[0].length()); else return B[index]; } @Override public double getBias(int index) { if(index == biases.length) return 0; else return biases[index]; } @Override public int numWeightsVecs() { return B.length+1; } @Override public CategoricalResults classify(DataPoint data) { if(B == null) throw new UntrainedModelException("Model has not yet been trained"); final Vec x = data.getNumericalValues(); double[] probs = new double[B.length + 1]; for (int i = 0; i < B.length; i++) probs[i] = x.dot(B[i])+biases[i]; probs[B.length] = 1; MathTricks.softmax(probs, false); return new CategoricalResults(probs); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { final int n = dataSet.size(); final double N = n; final int d = dataSet.getNumNumericalVars(); if(d < 1) throw new FailedToFitException("Data set has no numeric attributes to train on"); B = new Vec[dataSet.getClassSize()-1]; biases = new double[B.length]; for(int i = 0; i < B.length; i++) B[i] = new DenseVector(d); IntList randOrder = new IntList(n); ListUtils.addRange(randOrder, 0, n, 1); Vec means = null, stdDevs = null; if(standardized) { Vec[] ms = dataSet.getColumnMeanVariance(); means = ms[0]; stdDevs = ms[1]; stdDevs.applyFunction(Math::sqrt); //Now transform it so that stdDevs holds standard deviations, and means is the mean / standDev means.pairwiseDivide(stdDevs); stdDevs.applyFunction((x)-> 1/x); } double[] zs = new double[B.length]; /** * Contains the last time each feature was used */ int[] u = new int[d]; /** * Contains the current time. */ int q = 0; double prevLogLike = Double.POSITIVE_INFINITY; //learing rate in use double eta; for(int iter = 0; iter < epochs; iter++) { Collections.shuffle(randOrder); double logLike = 0; eta = learningRateDecay.rate(iter, epochs, initialLearningRate); final double etaReg = regularization*eta; for (int batch = 0; batch < randOrder.size(); batch += miniBatchSize) { int batchCount = Math.min(miniBatchSize, randOrder.size() - batch); double batchFrac = 1.0 / batchCount; for (int k = 0; k < batchCount; k++) { int j = randOrder.get(batch+k); final int c_j = dataSet.getDataPointCategory(j); final Vec x_j = dataSet.getDataPoint(j).getNumericalValues(); //compute softmax for (int i = 0; i < B.length; i++) zs[i] = x_j.dot(B[i]) + biases[i]; MathTricks.softmax(zs, true); //lazy apply lost rounds of regularization if (prior != Prior.UNIFORM) { for (IndexValue iv : x_j) { int i = iv.getIndex(); if(u[i] == 0) continue; double etaRegScaled = etaReg * (u[i] - q) / N; for (Vec b : B) { double bVal = b.get(i); double bNewVal = bVal; if (standardized) bNewVal += etaRegScaled * prior.gradientError(bVal * stdDevs.get(i) - means.get(i), 1, alpha); else bNewVal += etaRegScaled * prior.gradientError(bVal, 1, alpha); if (clipping && signum(bVal) != signum(bNewVal)) b.set(i, 0); else b.set(i, bNewVal); } u[i] = q; } //No need to do bias here, b/c bias is always up to date } for (int c = 0; c < B.length; c++) { Vec b = B[c]; double p_c = zs[c]; double log_pc = log(p_c); if (!Double.isInfinite(log_pc)) logLike += log_pc; double errScaling = (c == c_j ? 1 : 0) - p_c; b.mutableAdd(batchFrac*eta * errScaling, x_j); if (useBias) biases[c] += batchFrac*eta * errScaling + etaReg * prior.gradientError(biases[c] - 1, 1, alpha); } } q++; } logLike *= -1; if (prior != Prior.UNIFORM) { for (int i = 0; i < d; i++) { if (u[i] - q == 0) { for (Vec b : B) if (standardized) logLike += regularization*prior.logProb(b.get(i) * stdDevs.get(i) - means.get(i), 1, alpha); else logLike += regularization*prior.logProb(b.get(i), 1, alpha); continue; } double etaRegScaled = etaReg * (u[i] - q) / N; for (Vec b : B) { double bVal = b.get(i); if (bVal == 0.0) continue; double bNewVal = bVal; if (standardized) bNewVal += etaRegScaled * prior.gradientError(bVal * stdDevs.get(i) - means.get(i), 1, alpha); else bNewVal += etaRegScaled * prior.gradientError(bVal, 1, alpha); if (clipping && signum(bVal) != signum(bNewVal)) b.set(i, 0); else b.set(i, bNewVal); if(standardized) logLike += regularization*prior.logProb(b.get(i) * stdDevs.get(i) - means.get(i), 1, alpha); else logLike += regularization*prior.logProb(b.get(i), 1, alpha); } u[i] = q; } } double dif = abs(prevLogLike-logLike)/(abs(prevLogLike)+abs(logLike)); if(dif < tolerance) break; else prevLogLike = logLike; } } @Override public boolean supportsWeightedData() { return false; } /** * Returns the raw coefficient vector used without the bias term. For a * multinomial Logistic model, there are C-1 coefficient vectors. C is the * number of output classes. Altering the returned vector will alter the * model. The i'th index of the vector corresponds to the weight therm for * the i'th index in an input. * * @param id which coefficient vector to obtain * @return the vector of variable coefficients. */ public Vec getCoefficientVector(int id) { return B[id]; } @Override public StochasticMultinomialLogisticRegression clone() { return new StochasticMultinomialLogisticRegression(this); } }
24,468
31.844295
130
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/StochasticSTLinearL1.java
package jsat.classifiers.linear; import java.util.Iterator; import java.util.List; import jsat.SingleWeightVectorModel; import jsat.classifiers.CategoricalResults; import jsat.classifiers.Classifier; import jsat.exceptions.FailedToFitException; import jsat.linear.IndexValue; import jsat.linear.Vec; import jsat.lossfunctions.LogisticLoss; import jsat.lossfunctions.SquaredLoss; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.regression.Regressor; /** * This base class provides shared functionality and variables used by two * different training algorithms for L<sub>1</sub> regularized linear models. * Both squared and log loss are supported, making the results equivalent to * LASSO regression and Logistic regression respectively. <br> * <br> * These algorithms requires all feature values to be in the range * [-1, 1]. The implementation can do implicit rescaling, but rescaling may * destroy sparsity. If the data set is sparse and all values are zero or * positive use the default [0,1] rescaling to perform efficient rescaling that * will not destroy sparsity. <br> * <br> * Both algorithms are from: <br> * <a href="http://eprints.pascal-network.org/archive/00005418/">Shalev-Shwartz, * S.,&amp;Tewari, A. (2009). <i>Stochastic Methods for L<sub>1</sub>-regularized * Loss Minimization</i>. 26th International Conference on Machine Learning * (Vol. 12, pp. 929–936).</a> * * @author Edward Raff */ public abstract class StochasticSTLinearL1 implements Classifier, Regressor, Parameterized, SingleWeightVectorModel { private static final long serialVersionUID = -6761456665014802608L; /** * The number of training iterations */ protected int epochs; /** * The regularization penalty */ protected double lambda; /** * The loss function to use */ protected Loss loss; /** * The final weight vector */ protected Vec w; /** * The bias term to add */ protected double bias; /** * The minimum observed value for each feature */ protected double[] obvMin; /** * The maximum observed value for each feature */ protected double[] obvMax; /** * Whether or not to perform feature rescaling */ protected boolean reScale; /** * The scaled minimum */ protected double minScaled = 0; /** * The scaled maximum */ protected double maxScaled = 1; public static final int DEFAULT_EPOCHS = 1000; public static final double DEFAULT_REG = 1e-14; public static final Loss DEFAULT_LOSS = Loss.SQUARED; @Override abstract public StochasticSTLinearL1 clone(); public static enum Loss { SQUARED { @Override public double loss(double a, double y) { return SquaredLoss.loss(a, y); } @Override public double deriv(double a, double y) { return SquaredLoss.deriv(a, y); } @Override public double beta() { return 1; } @Override public CategoricalResults classify(double a) { CategoricalResults cr = new CategoricalResults(2); a = (a+1)/2; if(a > 1) a = 1; else if(a < 0) a = 0; cr.setProb(1, a); cr.setProb(0, 1-a); return cr; } @Override public double regress(double a) { return a; } }, LOG { @Override public double loss(double a, double y) { return LogisticLoss.loss(a, y); } @Override public double deriv(double a, double y) { return LogisticLoss.deriv(a, y); } @Override public double beta() { return 1.0/4.0; } @Override public CategoricalResults classify(double a) { return LogisticLoss.classify(a); } @Override public double regress(double a) { return 1/(1+Math.exp(-a)); } }; /** * Returns the loss on the prediction * @param a the predicted value * @param y the target value * @return the loss */ abstract public double loss(double a, double y); /** * Returns the value of the derivative of the loss function * @param a the predicted value * @param y the target value * @return the derivative of the loss */ abstract public double deriv(double a, double y); /** * Returns an upper bound on the 2nd derivative for classification * @return an upper bound on the 2nd derivative for classification */ abstract public double beta(); /** * The categorical results for a classification problem * @param a the dot product of the weight vector and an input * @return the binary problem classification results */ abstract public CategoricalResults classify(double a); /** * The output value result for a regression problem * @param a the dot product of the weight vector and an input * @return the final regression output */ abstract public double regress(double a); } /** * Sets the number of iterations of training that will be performed. * @param epochs the number of iterations */ public void setEpochs(int epochs) { if(epochs < 1) throw new ArithmeticException("A positive amount of iterations must be performed"); this.epochs = epochs; } /** * Returns the number of iterations of updating that will be done * @return the number of iterations */ public double getEpochs() { return epochs; } /** * Sets the maximum value of any feature after scaling is applied. This * value can be no greater than 1. * @param maxFeature the maximum feature value after scaling */ public void setMaxScaled(double maxFeature) { if(Double.isNaN(maxFeature)) throw new ArithmeticException("NaN is not a valid feature value"); else if(maxFeature > 1) throw new ArithmeticException("Maximum possible feature value is 1, can not use " + maxFeature); else if(maxFeature <= minScaled) throw new ArithmeticException("Maximum feature value must be learger than the minimum"); this.maxScaled = maxFeature; } /** * Returns the maximum feature value after scaling * @return the maximum feature value after scaling */ public double getMaxScaled() { return maxScaled; } /** * Sets the minimum value of any feature after scaling is applied. This * value can be no smaller than -1 * @param minFeature the minimum feature value after scaling */ public void setMinScaled(double minFeature) { if(Double.isNaN(minFeature)) throw new ArithmeticException("NaN is not a valid feature value"); else if(minFeature < -1) throw new ArithmeticException("Minimum possible feature value is -1, can not use " + minFeature); else if(minFeature >= maxScaled) throw new ArithmeticException("Minimum feature value must be smaller than the maximum"); this.minScaled = minFeature; } /** * Returns the minimum feature value after scaling * @return the minimum feature value after scaling */ public double getMinScaled() { return minScaled; } /** * Sets the regularization constant used for learning. The regularization * must be positive, and the learning rate is proportional to the * regularization value. This means regularizations very near zero will * take a long time to converge. * * @param lambda the regularization to apply */ public void setLambda(double lambda) { if(Double.isInfinite(lambda) || Double.isNaN(lambda) || lambda <= 0) throw new ArithmeticException("A positive amount of regularization must be performed"); this.lambda = lambda; } /** * Returns the amount of regularization to used in training * @return the regularization parameter. */ public double getLambda() { return lambda; } /** * Sets the loss function to use. This should not be altered after training * unless the leaner is going to be trained again. * @param loss the loss function to use */ public void setLoss(Loss loss) { this.loss = loss; } /** * returns the loss function in use * @return the loss function in use */ public Loss getLoss() { return loss; } /** * Sets whether or not scaling should be applied on th feature values of the * training vectors. Scaling should be used intelligently, scaling can * destroy sparsity in the data set. If scaling is not applied, and a value * is not in the range [-1, 1], a {@link FailedToFitException} could occur. * <br> Rescaling does not alter the data points passed in. * @param reScale whether or not to rescale feature values */ public void setReScale(boolean reScale) { this.reScale = reScale; } /** * Returns if scaling is in use * @return <tt>true</tt> if feature values are rescaled during training. */ public boolean isReScale() { return reScale; } /** * Computes {@link #w}.{@link Vec#dot(jsat.linear.Vec) }<tt>x</tt> and does * so by rescaling <tt>x</tt> as needed automatically and efficiently, even * if <tt>x</tt> is sparse. * @param x the value to compute the dot product with * @return the dot produce of w and x with the bias term */ protected double wDot(Vec x) { double a; if (reScale) { a = bias; if(!w.isSparse())//w is dense, jsut iterate over x { for(IndexValue iv : x) { int j = iv.getIndex(); double xV = iv.getValue() - obvMin[j]; xV *= (maxScaled - minScaled) / (obvMax[j] - obvMin[j]); xV += minScaled; a += w.get(j)*xV; } return a; } //Compute the dot and rescale w/o extra spacein a sprase freindly way Iterator<IndexValue> wIter = w.getNonZeroIterator(); Iterator<IndexValue> xIter = x.getNonZeroIterator(); if(!wIter.hasNext() || !xIter.hasNext()) return a; IndexValue wIV = wIter.next(); IndexValue xIV = xIter.next(); do { if (wIV.getIndex() == xIV.getIndex()) { int j = xIV.getIndex(); double xV = xIV.getValue() - obvMin[j]; xV *= (maxScaled - minScaled) / (obvMax[j] - obvMin[j]); xV += minScaled; //Scaled, now add to result a += wIV.getValue() * xV; if (!wIter.hasNext() || !xIter.hasNext()) break; wIV = wIter.next(); xIV = xIter.next(); } else if (wIV.getIndex() < xIV.getIndex()) if (wIter.hasNext()) wIV = wIter.next(); else break; else if (wIV.getIndex() > xIV.getIndex()) if (xIter.hasNext()) xIV = xIter.next(); else break; } while (wIV != null && xIV != null); } else a = w.dot(x) + bias; return a; } /** * Returns the weight vector used to compute results via a dot product. <br> * Do not modify this value, or you will alter the results returned. * @return the learned weight vector for prediction */ public Vec getWRaw() { return w; } /** * Returns a copy of the weight vector used to compute results via a dot * product. * @return a copy of the learned weight vector for prediction */ public Vec getW() { if(w == null) return w; else return w.clone(); } @Override public Vec getRawWeight() { return w; } @Override public double getBias() { return bias; } @Override public Vec getRawWeight(int index) { if(index < 1) return getRawWeight(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public double getBias(int index) { if (index < 1) return getBias(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public int numWeightsVecs() { return 1; } }
13,919
28.55414
115
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/kernelized/ALMA2K.java
package jsat.classifiers.linear.kernelized; import java.util.*; import jsat.DataSet; import jsat.classifiers.*; import jsat.classifiers.linear.ALMA2; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.distributions.Distribution; import jsat.distributions.Uniform; import jsat.distributions.kernels.KernelTrick; import jsat.exceptions.FailedToFitException; import jsat.linear.ScaledVector; import jsat.linear.Vec; import jsat.parameters.Parameter; import jsat.parameters.Parameter.ParameterHolder; import jsat.parameters.Parameterized; import jsat.utils.DoubleList; import jsat.utils.IntList; /** * Provides a kernelized version of the {@link ALMA2} algorithm. It is important * to note that the number of "support vectors" ALMA may learn is unbounded. * <br> * The averaged output of all previous hyperplanes is supported at almost no * overhead, and can be turned on by setting {@link #setAveraged(boolean) }. * This information is always collected, and the output can be changed once * already learned. * <br><br> * See: Gentile, C. (2002). <i>A New Approximate Maximal Margin Classification * Algorithm</i>. The Journal of Machine Learning Research, 2, 213–242. * Retrieved from <a href="http://dl.acm.org/citation.cfm?id=944811">here</a> * * @author Edward Raff */ public class ALMA2K extends BaseUpdateableClassifier implements BinaryScoreClassifier, Parameterized { private static final long serialVersionUID = 7247320234799227009L; private static final double p = 2; private double alpha; private double B; private double C = Math.sqrt(2); private int k; private int curRounds; @ParameterHolder private KernelTrick K; private List<Vec> supports; private List<Double> signedEtas; private List<Double> associatedScores; private List<Double> normalizers; private List<Integer> rounds; private boolean averaged = false; /** * Creates a new kernelized ALMA2 object * @param kernel the kernel function to use * @param alpha the alpha parameter of ALMA */ public ALMA2K(KernelTrick kernel, double alpha) { setKernelTrick(kernel); setAlpha(alpha); } /** * Copy constructor * @param other the ALMA2K object to copy */ protected ALMA2K(ALMA2K other) { this.alpha = other.alpha; this.B = other.B; this.C = other.C; this.k = other.k; this.K = other.K.clone(); this.averaged = other.averaged; if(other.supports != null) { this.supports = new ArrayList<Vec>(other.supports.size()); for(Vec v : other.supports) this.supports.add(v.clone()); this.signedEtas = new DoubleList(other.signedEtas); this.associatedScores = new DoubleList(other.associatedScores); this.normalizers = new DoubleList(other.normalizers); this.rounds = new IntList(other.rounds); } } @Override public ALMA2K clone() { return new ALMA2K(this); } /** * ALMA2K supports taking the averaged output of all previous hypothesis * weighted by the number of successful uses of the hypothesis during * training. This effectively reduces the variance of the classifier. It has * no impact on the training / update phase, only the classification results * are impacted. * <br><br> * Unlike most algorithms, this can be changed at any time without issue - * even after the algorithm has been trained the type of output (averaged or * last) can be switched on the fly. * @param averaged {@code true} to use the averaged out, {@code false} to * only use the last hypothesis */ public void setAveraged(boolean averaged) { this.averaged = averaged; } /** * Returns whether or not the averaged or last hypothesis is used * @return whether or not the averaged or last hypothesis is used */ public boolean isAveraged() { return averaged; } /** * Sets the kernel to use * @param K the kernel to use */ public void setKernelTrick(KernelTrick K) { this.K = K; } /** * Returns the kernel in use * @return the kernel in use */ public KernelTrick getKernelTrick() { return K; } /** * Alpha controls the approximation of the large margin formed by ALMA, * with larger values causing more updates. A value of 1.0 will update only * on mistakes, while smaller values update if the error was not far enough * away from the margin. * <br><br> * NOTE: Whenever alpha is set, the value of {@link #setB(double) B} will * also be set to an appropriate value. This is not the only possible value * that will lead to convergence, and can be set manually after alpha is set * to another value. * * @param alpha the approximation scale in (0.0, 1.0] */ public void setAlpha(double alpha) { if(alpha <= 0 || alpha > 1 || Double.isNaN(alpha)) throw new ArithmeticException("alpha must be in (0, 1], not " + alpha); this.alpha = alpha; setB(1.0/alpha); } /** * Returns the approximation coefficient used * @return the approximation coefficient used */ public double getAlpha() { return alpha; } /** * Sets the B variable of the ALMA algorithm, this is set automatically by * {@link #setAlpha(double) }. * @param B the value for B */ public void setB(double B) { this.B = B; } /** * Returns the B value of the ALMA algorithm * @return the B value of the ALMA algorithm */ public double getB() { return B; } /** * Sets the C value of the ALMA algorithm. The default value is the one * suggested in the paper. * @param C the C value of ALMA */ public void setC(double C) { if(C <= 0 || Double.isInfinite(C) || Double.isNaN(C)) throw new ArithmeticException("C must be a posative cosntant"); this.C = C; } public double getC() { return C; } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { if(numericAttributes <= 0) throw new FailedToFitException("ALMA2 requires numeric features"); if(predicting.getNumOfCategories() != 2) throw new FailedToFitException("ALMA2 works only for binary classification"); supports = new ArrayList<Vec>(); signedEtas = new DoubleList(); associatedScores = new DoubleList(); normalizers = new DoubleList(); rounds = new IntList(); k = 1; curRounds = 0; } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { final Vec x_t = dataPoint.getNumericalValues(); final double y_t = targetClass*2-1; double gamma = B * Math.sqrt(p-1) / k; double wx = score(x_t, false); if(y_t*wx <= (1-alpha)*gamma)//update { double eta = C/Math.sqrt(p-1)/Math.sqrt(k++); double norm = Math.sqrt(K.eval(x_t, x_t)); associatedScores.add(score(new ScaledVector(1/norm, x_t), false)); supports.add(x_t); normalizers.add(norm); signedEtas.add(eta*y_t); rounds.add(curRounds); curRounds = 0; } else curRounds++; } /** * Computes the output of the summations of the input vector with the * current weight vector as a recursive linear combination of all previous * support vectors and their associated score values. <br> * See Remark 5 in the original paper. * @param x the input vector to compute the score value * @return the score for the input indicating which side of the hyperplane * it is on */ private double score(Vec x, boolean averaged) { /* * Score for the current dot procut with the weight vector, denom for * the current normalizing constant. */ double score = 0; double denom = 0; double finalScore = 0; for(int i = 0; i < supports.size(); i++) { double eta_s = signedEtas.get(i); double tmp = eta_s*K.eval(supports.get(i), x)/normalizers.get(i); double denom_tmp = 2*eta_s*associatedScores.get(i)+eta_s*eta_s; denom += denom/Math.max(1, denom)+ denom_tmp; score += tmp/Math.max(1, denom); if(averaged) finalScore += score*rounds.get(i); } if(averaged) return finalScore; else return score; } @Override public CategoricalResults classify(DataPoint data) { double wx = getScore(data); CategoricalResults cr =new CategoricalResults(2); if(wx < 0) cr.setProb(0, 1.0); else cr.setProb(1, 1.0); return cr; } @Override public double getScore(DataPoint dp) { return score(dp.getNumericalValues(), averaged); } @Override public boolean supportsWeightedData() { return false; } /** * Guesses the distribution to use for the &alpha; parameter * * @param d the dataset to get the guess for * @return the guess for the &alpha; parameter * @see #setAlpha(double) */ public static Distribution guessAlpha(DataSet d) { return new Uniform(1e-3, 1.0); } }
9,841
29.660436
113
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/kernelized/BOGD.java
package jsat.classifiers.linear.kernelized; import java.util.*; import jsat.DataSet; import jsat.classifiers.*; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.distributions.Distribution; import jsat.distributions.LogUniform; import jsat.distributions.kernels.KernelTrick; import jsat.linear.Vec; import jsat.lossfunctions.HingeLoss; import jsat.lossfunctions.LossC; import jsat.parameters.Parameter; import jsat.parameters.Parameter.ParameterHolder; import jsat.parameters.Parameterized; import jsat.utils.DoubleList; import jsat.utils.random.RandomUtil; import jsat.utils.random.XORWOW; /** * Bounded Online Gradient Descent (BOGD) is a kernel learning algorithm that * uses a bounded number of support vectors. Once the maximum number of support * vectors is reached, old vectors are dropped either in a uniform random * fashion, or weighted by the kernel function and the current coefficient for * the vector. The later is the default method and is referred to as BOGD++.<br> * <br> * See: Zhao, P., Wang, J., Wu, P., Jin, R.,&amp;Hoi, S. C. H. (2012). <i>Fast * Bounded Online Gradient Descent Algorithms for Scalable Kernel-Based Online * Learning</i>. In Proceedings of the 29th International Conference on Machine * Learning (pp. 169–176). Learning; Machine Learning. Retrieved from * <a href="http://arxiv.org/abs/1206.4633">here</a> * @author Edward Raff */ public class BOGD extends BaseUpdateableClassifier implements BinaryScoreClassifier, Parameterized { private static final long serialVersionUID = -3547832514098781996L; @ParameterHolder private KernelTrick k; private int budget; private double eta; private double reg; private double maxCoeff; private LossC lossC; private boolean uniformSampling; private Random rand; private List<Vec> vecs; /** * Stores the sqrt of each support vector's kernel product with itself */ private List<Double> selfK; private DoubleList alphas; private List<Double> accelCache; /** * Cache of values used for BOGD++ sampling */ private double[] dist; /** * Creates a new BOGD++ learner using the {@link HingeLoss} * @param k the kernel trick to use * @param budget the budget for support vectors to allow * @param eta the learning rate to use * @param reg the regularization parameter * @param maxCoeff the maximum support vector coefficient to allow */ public BOGD(KernelTrick k, int budget, double eta, double reg, double maxCoeff) { this(k, budget, eta, reg, maxCoeff, new HingeLoss()); } /** * Creates a new BOGD++ learner * @param k the kernel trick to use * @param budget the budget for support vectors to allow * @param eta the learning rate to use * @param reg the regularization parameter * @param maxCoeff the maximum support vector coefficient to allow * @param lossC the loss function to use */ public BOGD(KernelTrick k, int budget, double eta, double reg, double maxCoeff, LossC lossC) { setKernel(k); setBudget(budget); setEta(eta); setRegularization(reg); setMaxCoeff(maxCoeff); this.lossC = lossC; setUniformSampling(false); } /** * Copy constructor * @param toCopy the object to make a copy of */ public BOGD(BOGD toCopy) { this.k = toCopy.k.clone(); this.budget = toCopy.budget; this.eta = toCopy.eta; this.reg = toCopy.reg; this.maxCoeff = toCopy.maxCoeff; this.lossC = toCopy.lossC.clone(); this.uniformSampling = toCopy.uniformSampling; this.rand = RandomUtil.getRandom(); if(toCopy.vecs != null) { this.vecs = new ArrayList<Vec>(budget); for(Vec v : toCopy.vecs) this.vecs.add(v.clone()); this.selfK = new DoubleList(toCopy.selfK); this.alphas = new DoubleList(toCopy.alphas); } if(toCopy.accelCache != null) this.accelCache = new DoubleList(toCopy.accelCache); if(toCopy.dist != null) this.dist = Arrays.copyOf(toCopy.dist, toCopy.dist.length); } /** * Sets the regularization parameter used for training. The original paper * suggests values in the range 2<sup>x</sup>/T<sup>2</sup> for <i>x</i> * &isin; {-3, -2, -1, 0, 1, 2, 3} where <i>T</i> is the number of data * instances that will be trained on * * @param regularization the positive regularization parameter to use. */ public void setRegularization(double regularization) { if(regularization <= 0 || Double.isNaN(regularization) || Double.isInfinite(regularization)) throw new IllegalArgumentException("Regularization must be positive, not " + regularization); this.reg = regularization; } /** * Returns the regularization parameter used * @return the regularization parameter used */ public double getRegularization() { return reg; } /** * Sets the learning rate to use for training. The original paper suggests * values in the range 2<sup>x</sup> for <i>x</i> * &isin; {-3, -2, -1, 0, 1, 2, 3} * @param eta the positive learning rate to use */ public void setEta(double eta) { if(eta <= 0 || Double.isNaN(eta) || Double.isInfinite(eta)) throw new IllegalArgumentException("Eta must be positive, not " + eta); this.eta = eta; } /** * Returns the learning rate in use * @return the learning rate in use */ public double getEta() { return eta; } /** * Sets the maximum allowed value for any support vector allowed. The * original paper suggests values in the range 2<sup>x</sup> for <i>x</i> * &isin; {0, 1, 2, 3, 4} * @param maxCoeff the maximum value for any support vector */ public void setMaxCoeff(double maxCoeff) { if(maxCoeff <= 0 || Double.isNaN(maxCoeff) || Double.isInfinite(maxCoeff)) throw new IllegalArgumentException("MaxCoeff must be positive, not " + maxCoeff); this.maxCoeff = maxCoeff; } /** * Returns the maximum allowed value for any support vector * @return the maximum allowed value for any support vector */ public double getMaxCoeff() { return maxCoeff; } /** * Sets the budget for support vectors * @param budget the allowed budget for support vectors */ public void setBudget(int budget) { if(budget <= 0 ) throw new IllegalArgumentException("Budget must be positive, not " + budget); this.budget = budget; } /** * Returns the maximum number of allowed support vectors * @return the maximum number of allowed support vectors */ public int getBudget() { return budget; } /** * Sets the kernel to use * @param k the kernel to use */ public void setKernel(KernelTrick k) { this.k = k; } /** * Returns the kernel to use * @return the kernel to use */ public KernelTrick getKernel() { return k; } /** * Sets whether or not support vectors should be removed by uniform sampling * or not. The default is {@code false}, which corresponds to BOGD++. * @param uniformSampling {@code true} to use uniform sampling, * {@code false} otherwise. */ public void setUniformSampling(boolean uniformSampling) { this.uniformSampling = uniformSampling; } /** * Returns {@code true } is uniform sampling is in use, or {@code false} if * the BOGD++ sampling procedure is in use * @return {@code true } is uniform sampling is in use, or {@code false} if * the BOGD++ sampling procedure is in use */ public boolean isUniformSampling() { return uniformSampling; } @Override public BOGD clone() { return new BOGD(this); } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { vecs = new ArrayList<Vec>(budget); alphas = new DoubleList(budget); selfK = new DoubleList(budget); if(k.supportsAcceleration()) accelCache = new DoubleList(budget); else accelCache = null; if(!uniformSampling) dist = new double[budget]; rand = RandomUtil.getRandom(); } @Override public double getScore(DataPoint dp) { Vec x = dp.getNumericalValues(); return score(x, k.getQueryInfo(x)); } private double score(Vec x, List<Double> qi) { return k.evalSum(vecs, accelCache, alphas.getBackingArray(), x, qi, 0, alphas.size()); } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { final Vec x_t = dataPoint.getNumericalValues(); final double y_t = targetClass*2-1; final List<Double> qi = k.getQueryInfo(x_t); final double score = score(x_t, qi); final double lossD = lossC.getDeriv(score, y_t); if(lossD == 0) { alphas.getVecView().mutableMultiply(1-eta*reg); } else { if(vecs.size() < budget) { alphas.getVecView().mutableMultiply(1-eta*reg); alphas.add(-eta*lossD); selfK.add(Math.sqrt(k.eval(0, 0, Arrays.asList(x_t), qi))); if(k.supportsAcceleration()) accelCache.addAll(qi); vecs.add(x_t); } else//budget maintinance { final int toRemove; final double normalize; if(uniformSampling) { toRemove = rand.nextInt(budget); normalize = 1; } else { double s = 0; for(int i = 0; i < budget; i++) s += Math.abs(alphas.get(i))*selfK.get(i); s = (budget-1)/s; final double target = rand.nextDouble(); double cur = 0; int i = -1; while(cur < target) { i++; cur += dist[i] = 1-s*alphas.get(i)*selfK.get(i); } toRemove = i++; while(i < budget) cur += dist[i] = 1-s*alphas.get(i)*selfK.get(i++); normalize = cur; } for(int i = 0; i < budget; i++) { if(i == toRemove) continue; double alpha_i = alphas.getD(i); double sign = Math.signum(alpha_i); alpha_i = Math.abs(alpha_i); double tmp = uniformSampling ? 1.0/budget : dist[i]/normalize; alphas.set(i, sign*Math.min((1-reg*eta)/(1-tmp)*alpha_i, maxCoeff*eta)); } //Remove old point if(k.supportsAcceleration()) { int catToRet = accelCache.size()/budget; for(int i = 0; i < catToRet; i++) accelCache.remove(toRemove*catToRet); } alphas.remove(toRemove); vecs.remove(toRemove); selfK.remove(toRemove); //Add new point alphas.add(-eta*lossD); selfK.add(Math.sqrt(k.eval(0, 0, Arrays.asList(x_t), qi))); accelCache.addAll(qi); vecs.add(x_t); } } } @Override public CategoricalResults classify(DataPoint data) { Vec x = data.getNumericalValues(); return lossC.getClassification(score(x, k.getQueryInfo(x))); } @Override public boolean supportsWeightedData() { return false; } /** * Guesses the distribution to use for the Regularization parameter * * @param d the dataset to get the guess for * @return the guess for the Regularization parameter * @see #setRegularization(double) */ public static Distribution guessRegularization(DataSet d) { double T2 = d.size(); T2*=T2; return new LogUniform(Math.pow(2, -3)/T2, Math.pow(2, 3)/T2); } /** * Guesses the distribution to use for the &eta; parameter * * @param d the dataset to get the guess for * @return the guess for the &eta; parameter * @see #setEta(double) */ public static Distribution guessEta(DataSet d) { return new LogUniform(Math.pow(2, -3), Math.pow(2, 3)); } /** * Guesses the distribution to use for the MaxCoeff parameter * * @param d the dataset to get the guess for * @return the guess for the MaxCoeff parameter * @see #setMaxCoeff(double) (double) */ public static Distribution guessMaxCoeff(DataSet d) { return new LogUniform(Math.pow(2, 0), Math.pow(2, 4)); } }
13,521
31.195238
113
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/kernelized/CSKLR.java
package jsat.classifiers.linear.kernelized; import java.util.ArrayList; import java.util.List; import java.util.Random; import jsat.classifiers.BaseUpdateableClassifier; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.classifiers.DataPoint; import jsat.distributions.kernels.KernelTrick; import jsat.linear.Vec; import jsat.parameters.Parameterized; import jsat.utils.DoubleList; import static java.lang.Math.*; import jsat.DataSet; import jsat.distributions.Distribution; import jsat.distributions.LogUniform; import jsat.exceptions.FailedToFitException; import jsat.utils.random.RandomUtil; /** * An implementation of Conservative Stochastic Kernel Logistic Regression. This * is an online algorithm that obtains sparse solutions by conservatively * rejecting updates based on a binomial distribution of the error on each * update. <br><br> * This algorithm works best on data sets with a very high number of samples * where a high accuracy is obtainable using a kernel model. It is often the * case that this model produces accurate results, but has a low confidence due * to the conservative updating. This can be counteracted by having a very large * number of features, but that often increases the size of the model. * <br><br> * It is important to read the documentation and test some different values for * the {@link #setEta(double) learning rate} and {@link #setGamma(double) gamma} * variables. They behave different compared to many algorithms. * <br><br> * It is possible to obtain a more confident model and a slightly larger model * by using several epochs. Instead of using this class, the * {@link CSKLRBatch batch version} of this algorithm should be used instead. * <br><br> * See paper: <br> * Zhang, L., Jin, R., Chen, C., Bu, J.,&amp;He, X. (2012). <i>Efficient Online * Learning for Large-Scale Sparse Kernel Logistic Regression</i>. Twenty-Sixth * AAAI Conference on Artificial Intelligence (pp. 1219–1225). Retrieved from * <a href="http://www.aaai.org/ocs/index.php/AAAI/AAAI12/paper/viewPDFInterstitial/5003/5544">here</a> * * @author Edward Raff */ public class CSKLR extends BaseUpdateableClassifier implements Parameterized { private static final long serialVersionUID = 2325605193408720811L; private double eta; private DoubleList alpha; private List<Vec> vecs; private double curNorm; private KernelTrick k; private double R; private Random rand; private UpdateMode mode; private double gamma = 2; private List<Double> accelCache; /** * Creates a new CSKLR object * @param eta the learning rate to use * @param k the kernel trick to use * @param R the maximal norm of the surface * @param mode the mode to use */ public CSKLR(double eta, KernelTrick k, double R, UpdateMode mode) { setEta(eta); setKernel(k); setR(R); setMode(mode); } /** * Guesses the distribution to use for the R parameter * * @param d the dataset to get the guess for * @return the guess for the R parameter * @see #setR(double) */ public static Distribution guessR(DataSet d) { return new LogUniform(1, 1e5); } /** * Controls when updates are performed on the model. Depending on which * update model is used, the acceptable values and behaviors of * {@link #setEta(double) } and {@link #setGamma(double) } may change. * <br><br> * The "auxiliary" modes perform updates with a probability of * <i>log(1+e<sup>-z</sup>) / a(z)</i>, where <i>a(z)</i> is the auxiliary * function and <i>z</i> is the raw margin value times the class label. */ public enum UpdateMode { /** * NC stands for Non-Conservative, this mode will perform a model update * on every new input, creating a very dense model. The * {@link #setEta(double) learning rate} may take on any positive value, * and {@link #setGamma(double) } is not used. */ NC { @Override protected double pt(double y, double score, double preScore, double eta, double gamma) { return 1; } @Override protected double grad(double y, double score, double preScore, double gamma) { return score-1; } }, /** * Performs model updates probabilistically based on their distance from * the margin of the classifier. In this case, {@link #setEta(double) } * should be less than 2, or the model will become dense. * {@link #setGamma(double) } is not used. */ MARGIN { @Override protected double pt(double y, double score, double preScore, double eta, double gamma) { return (2-eta)/(2-eta+eta*score); } @Override protected double grad(double y, double score, double preScore, double gamma) { return score-1; } }, /** * Performs model updates based on a "auxiliary" function * <i>a(z) = log(&gamma; + e<sup>-z</sup>)</i>. * {@link #setGamma(double) gamma} should be in the range (1, Infinity) * where larger values increase the sparsity of the model * <br><br> * This is the main auxiliary method used by the authors. They use * values for &gamma; in the range of <i>1+10<sup>&plusmn; * x</sup></i> &forall; x &isin; {0, 1, 2, 3, 4} */ AUXILIARY_1 { @Override protected double pt(double y, double score, double preScore, double eta, double gamma) { double z = y*preScore; return log(1+exp(-z))/log(gamma+exp(-z)); } @Override protected double grad(double y, double score, double preScore, double gamma) { double z = y*preScore; return -1/(1+gamma*exp(z)); } }, /** * Performs model updates based on a "auxiliary" function * <i>a(z) = log(1 + &gamma; e<sup>-z</sup>)</i>. * {@link #setGamma(double) gamma} should be in the range (1, Infinity) * where larger values increase the sparsity of the model */ AUXILIARY_2 { @Override protected double pt(double y, double score, double preScore, double eta, double gamma) { double z = y*preScore; return log(1+exp(-z))/log(1+gamma*exp(-z)); } @Override protected double grad(double y, double score, double preScore, double gamma) { double z = y*preScore; return -gamma/(gamma+exp(z)); } }, /** * Performs model updates based on a "auxiliary" function * <i>a(z) = max(loss(z), loss(&gamma;)</i>. * {@link #setGamma(double) gamma} should be in the range (0, Infinity) * where smaller values increase the sparsity of the model */ AUXILIARY_3 { @Override protected double pt(double y, double score, double preScore, double eta, double gamma) { double z = y*preScore; return log(1+exp(-z))/log(1+exp(-gamma)); } @Override protected double grad(double y, double score, double preScore, double gamma) { return score-1; } }; /** * Returns the Bernoulli trial probability variable * @param y the sign of the input point * @param score the logistic regression score for the input * @param preScore the raw margin before the final * @param eta the learning rate * @param gamma the gamma variable * @return the Bernoulli trial probability variable */ abstract protected double pt(double y, double score, double preScore, double eta, double gamma); /** * Get the gradient value that should be applied based on the input * variable from the current model * @param y the sign of the input point * @param score the logistic regression score for the input * @param preScore the raw margin before the final * @param gamma the gamma variable * @return the coefficient to apply to the stochastic update */ abstract protected double grad(double y, double score, double preScore, double gamma); } /** * Copy constructor * @param toClone the object to copy */ protected CSKLR(CSKLR toClone) { if(toClone.alpha != null) this.alpha = new DoubleList(toClone.alpha); if(toClone.vecs != null) { this.vecs = new ArrayList<Vec>(toClone.vecs); } this.curNorm = toClone.curNorm; this.mode = toClone.mode; this.R = toClone.R; this.eta = toClone.eta; this.setKernel(toClone.k.clone()); if(toClone.accelCache != null) this.accelCache = new DoubleList(toClone.accelCache); this.gamma = toClone.gamma; this.rand = RandomUtil.getRandom(); this.setEpochs(toClone.getEpochs()); } /** * Sets the learning rate to use for the algorithm. Unlike many other * stochastic algorithms, the learning rate for CSKLR should be large, often * in the range of (0.5, 1) - and can even be larger than 1 at times. If the * learning rate is too low, it may be difficult to get strong confidence * results from the algorithm. * * @param eta the positive learning rate to use */ public void setEta(double eta) { if(eta < 0 || Double.isNaN(eta) || Double.isInfinite(eta)) throw new IllegalArgumentException("The learning rate should be in (0, Inf), not " + eta); this.eta = eta; } /** * Returns the learning rate to use * @return the learning rate to use */ public double getEta() { return eta; } /** * Sets the maximal margin norm value for the algorithm. When the norm is * exceeded, the coefficients will be rescaled to fit in the norm. If the * maximal norm is too small (less than 5), it may be difficult to get * strong confidence results from the algorithm. <br> * A good range of values suggested by the original paper is 10<sup>x</sup> * &forall; x &isin; {0, 1, 2, 3, 4, 5} * @param R */ public void setR(double R) { if(R < 0 || Double.isNaN(R) || Double.isInfinite(R)) throw new IllegalArgumentException("The max norm should be in (0, Inf), not " + R); this.R = R; } /** * Returns the maximal norm of the algorithm * @return the maximal norm of the algorithm */ public double getR() { return R; } /** * Sets what update mode should be used. The update mode controls the * sparsity of the mode, and the behavior of {@link #setGamma(double) } * @param mode the update mode to use */ public void setMode(UpdateMode mode) { this.mode = mode; } /** * Returns the update mode in use * @return the update mode in use */ public UpdateMode getMode() { return mode; } /** * Sets the gamma value to use. This value, depending on which * {@link UpdateMode} is used, controls the sparsity of the model. * @param gamma the gamma parameter, which is at least always positive */ public void setGamma(double gamma) { if(gamma < 0 || Double.isNaN(gamma) || Double.isInfinite(gamma)) throw new IllegalArgumentException("Gamma must be in (0, Infity), not " + gamma); this.gamma = gamma; } /** * Returns the gamma sparsity parameter value * @return the gamma sparsity parameter value */ public double getGamma() { return gamma; } /** * Set which kernel trick to use * @param k the kernel to use */ public void setKernel(KernelTrick k) { this.k = k; } /** * Returns the kernel trick in use * @return the kernel trick in use */ public KernelTrick getKernel() { return k; } /** * Computes the margin score for the given data point * @param x the input vector * @return the margin score */ private double getPreScore(Vec x) { return k.evalSum(vecs, accelCache, alpha.getBackingArray(), x, 0, alpha.size()); } /** * Returns the binary logistic regression score * @param y the sign of the desired class (-1 or 1) * @param pre the raw coefficient score * @return the probability in [0, 1] that the score is of the desired class */ protected static double getScore(double y, double pre) { return 1/(1+Math.exp(-y*pre)); } @Override public CSKLR clone() { return new CSKLR(this); } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { if(predicting.getNumOfCategories() != 2) throw new FailedToFitException("CSKLR supports only binary classification"); alpha = new DoubleList(); vecs = new ArrayList<Vec>(); curNorm = 0; rand = RandomUtil.getRandom(); if(k.supportsAcceleration()) accelCache = new DoubleList(); } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { double y_t = targetClass*2-1; Vec x_t = dataPoint.getNumericalValues(); double pre = getPreScore(x_t); double score = getScore(y_t, pre); switch(mode) { case NC: break; default: double pt = mode.pt(y_t, score, pre, eta, gamma); if(rand.nextDouble() > pt) return; break; } double alpha_i = -eta*y_t*mode.grad(y_t, score, pre, gamma)*weight; alpha.add(alpha_i); vecs.add(x_t); k.addToCache(x_t, accelCache); curNorm += Math.abs(alpha_i) * k.eval(vecs.size(), vecs.size(), vecs, accelCache); //projection step if (curNorm > R) { double coef = R/curNorm; for(int i = 0; i < alpha.size(); i++) alpha.set(i, alpha.get(i)*coef); curNorm = coef; } } @Override public CategoricalResults classify(DataPoint data) { CategoricalResults cr = new CategoricalResults(2); double p_0 = getScore(-1, getPreScore(data.getNumericalValues())); cr.setProb(0, p_0); cr.setProb(1, 1-p_0); return cr; } @Override public boolean supportsWeightedData() { return true; } }
15,365
32.116379
113
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/kernelized/CSKLRBatch.java
package jsat.classifiers.linear.kernelized; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Random; import jsat.DataSet; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.classifiers.svm.SupportVectorLearner; import jsat.distributions.Distribution; import jsat.distributions.LogUniform; import jsat.distributions.kernels.KernelTrick; import jsat.exceptions.FailedToFitException; import jsat.linear.Vec; import jsat.parameters.Parameterized; import jsat.utils.IntList; import jsat.utils.ListUtils; import jsat.utils.random.RandomUtil; /** * An implementation of Conservative Stochastic Kernel Logistic Regression. This * is an online algorithm that obtains sparse solutions by conservatively * rejecting updates based on a binomial distribution of the error on each * update. <br><br> * This algorithm works best on data sets with a very high number of samples * where a high accuracy is obtainable using a kernel model. It is often the * case that this model produces accurate results, but has a low confidence due * to the conservative updating. This can be counteracted by having a very large * number of features, but that often increases the size of the model. * <br> * This batch version can also be used to more efficiently learn dense KLR * models using the stochastic method with the {@link CSKLR.UpdateMode#NC} mode if model * sparsity is not important. * <br><br> * It is important to read the documentation and test some different values for * the {@link #setEta(double) learning rate} and {@link #setGamma(double) gamma} * variables. They behave different compared to many algorithms. * <br><br> * See paper: <br> * Zhang, L., Jin, R., Chen, C., Bu, J.,&amp;He, X. (2012). <i>Efficient Online * Learning for Large-Scale Sparse Kernel Logistic Regression</i>. Twenty-Sixth * AAAI Conference on Artificial Intelligence (pp. 1219–1225). Retrieved from * <a href="http://www.aaai.org/ocs/index.php/AAAI/AAAI12/paper/viewPDFInterstitial/5003/5544">here</a> * * @author Edward Raff */ public class CSKLRBatch extends SupportVectorLearner implements Parameterized, Classifier { private static final long serialVersionUID = -2305532659182911285L; private double eta; private double curNorm; private double R = 10; private int T = 0; private CSKLR.UpdateMode mode; protected double gamma = 2; private int epochs = 10; /** * Creates a new SCKLR Batch learning object * @param eta the learning rate to use * @param kernel the kernel to use * @param R the maximal norm of the surface * @param mode the mode to use * @param cacheMode the kernel caching mode to use */ public CSKLRBatch(double eta, KernelTrick kernel, double R, CSKLR.UpdateMode mode, CacheMode cacheMode) { super(kernel, cacheMode); setEta(eta); setR(R); setMode(mode); } /** * Copy constructor * @param toClone the object to copy */ protected CSKLRBatch(CSKLRBatch toClone) { super(toClone); this.curNorm = toClone.curNorm; this.epochs = toClone.epochs; this.eta = toClone.eta; this.R = toClone.R; this.T = toClone.T; this.mode = toClone.mode; this.gamma = toClone.gamma; } @Override public CSKLRBatch clone() { return new CSKLRBatch(this); } /** * Sets the number of training epochs (passes) through the data set * @param epochs the number of passes through the data set */ public void setEpochs(int epochs) { this.epochs = epochs; } /** * Returns the number of passes through the data set * @return the number of passes through the data set */ public int getEpochs() { return epochs; } /** * Sets the learning rate to use for the algorithm. Unlike many other * stochastic algorithms, the learning rate for CSKLR should be large, often * in the range of (0.5, 1) - and can even be larger than 1 at times. If the * learning rate is too low, it may be difficult to get strong confidence * results from the algorithm. * * @param eta the positive learning rate to use */ public void setEta(double eta) { if(eta < 0 || Double.isNaN(eta) || Double.isInfinite(eta)) throw new IllegalArgumentException("The learning rate should be in (0, Inf), not " + eta); this.eta = eta; } /** * Returns the learning rate to use * @return the learning rate to use */ public double getEta() { return eta; } /** * Sets the maximal margin norm value for the algorithm. When the norm is * exceeded, the coefficients will be rescaled to fit in the norm. If the * maximal norm is too small (less than 5), it may be difficult to get * strong confidence results from the algorithm. <br> * A good range of values suggested by the original paper is 10<sup>x</sup> * &forall; x &isin; {0, 1, 2, 3, 4, 5} * @param R */ public void setR(double R) { if(R < 0 || Double.isNaN(R) || Double.isInfinite(R)) throw new IllegalArgumentException("The max norm should be in (0, Inf), not " + R); this.R = R; } /** * Returns the maximal norm of the algorithm * @return the maximal norm of the algorithm */ public double getR() { return R; } /** * Sets what update mode should be used. The update mode controls the * sparsity of the mode, and the behavior of {@link #setGamma(double) } * @param mode the update mode to use */ public void setMode(CSKLR.UpdateMode mode) { this.mode = mode; } /** * Returns the update mode in use * @return the update mode in use */ public CSKLR.UpdateMode getMode() { return mode; } /** * Sets the gamma value to use. This value, depending on which * {@link CSKLR.UpdateMode} is used, controls the sparsity of the model. * @param gamma the gamma parameter, which is at least always positive */ public void setGamma(double gamma) { if(gamma < 0 || Double.isNaN(gamma) || Double.isInfinite(gamma)) throw new IllegalArgumentException("Gamma must be in (0, Infity), not " + gamma); this.gamma = gamma; } /** * Returns the gamma sparsity parameter value * @return the gamma sparsity parameter value */ public double getGamma() { return gamma; } /** * Guesses the distribution to use for the R parameter * * @param d the dataset to get the guess for * @return the guess for the R parameter * @see #setR(double) */ public static Distribution guessR(DataSet d) { return new LogUniform(1, 1e5); } @Override public CategoricalResults classify(DataPoint data) { CategoricalResults cr = new CategoricalResults(2); double p_0 = CSKLR.getScore(-1, getPreScore(data.getNumericalValues())); cr.setProb(0, p_0); cr.setProb(1, 1-p_0); return cr; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { if(dataSet.getClassSize() != 2) throw new FailedToFitException("CSKLR supports only binary classification"); //First we need to set up the vectors array final int N = dataSet.size(); vecs = new ArrayList<Vec>(N); alphas = new double[N]; for(int i = 0; i < N; i++) vecs.add(dataSet.getDataPoint(i).getNumericalValues()); curNorm = 0; T = 0; Random rand = RandomUtil.getRandom(); IntList sampleOrder = new IntList(N); ListUtils.addRange(sampleOrder, 0, N, 1); setCacheMode(getCacheMode());//Initiates the cahce for(int epoch = 0; epoch < epochs; epoch++) { Collections.shuffle(sampleOrder); for(int i : sampleOrder) { final double weight = dataSet.getWeight(i); final double y_t = dataSet.getDataPointCategory(i)*2-1; final Vec x_t = vecs.get(i); final double pre = getPreScore(x_t); final double score = CSKLR.getScore(y_t, pre); switch(mode) { case NC: break; default: double pt = mode.pt(y_t, score, pre, eta, gamma); if(rand.nextDouble() > pt) continue; break; } double alpha_i = -eta*y_t*mode.grad(y_t, score, pre, gamma)*weight; alphas[i] += alpha_i; curNorm += Math.abs(alpha_i)*kEval(i, i); //projection step if(curNorm > R) { double coef = R/curNorm; for(int j = 0; j < alphas.length; j++) alphas[j] *= coef; curNorm = coef; } } } int supportVectorCount = 0; for(int i = 0; i < N; i++) if(alphas[i] > 0 || alphas[i] < 0)//Its a support vector { ListUtils.swap(vecs, supportVectorCount, i); alphas[supportVectorCount++] = alphas[i]; } vecs = new ArrayList<Vec>(vecs.subList(0, supportVectorCount)); alphas = Arrays.copyOfRange(alphas, 0, supportVectorCount); setCacheMode(null); setAlphas(alphas); } private double getPreScore(Vec x) { return kEvalSum(x); } @Override public boolean supportsWeightedData() { return true; } }
10,307
30.141994
107
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/kernelized/DUOL.java
package jsat.classifiers.linear.kernelized; import static java.lang.Math.*; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import jsat.DataSet; import jsat.classifiers.BaseUpdateableClassifier; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.classifiers.DataPoint; import jsat.classifiers.linear.PassiveAggressive; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.distributions.Distribution; import jsat.distributions.LogUniform; import jsat.distributions.kernels.KernelTrick; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.linear.Vec; import jsat.parameters.Parameter; import jsat.parameters.Parameter.ParameterHolder; import jsat.parameters.Parameterized; import jsat.utils.DoubleList; /** * Provides an implementation of Double Update Online Learning (DUOL) algorithm. * It is a kernelized extension of {@link PassiveAggressive} (PA-I) where one * previously learned support vector may be updated upon each addition to the * support vector set. The SV set is unbounded in size. The objective function * is not identical because of the dual updates. * <br><br> * Using a larger {@link #setC(double) C} value for DUOL has theoretical * improvements, as it increases the number of "strong" dual updates. The * default value is set to 10 as suggested in the paper. * See:<br> * <ul> * <li> * Zhao, P., Hoi, S. C. H.,&amp;Jin, R. (2011). <i>Double Updating Online Learning</i>. * Journal of Machine Learning Research, 12, 1587–1615. Retrieved from * <a href="http://www.cse.msu.edu/~rongjin/publications/zhao11a.pdf"> here</a> * </li> * <li> * Zhao, P., Hoi, S. C. H.,&amp;Jin, R. (2009). <i>DUOL: A Double Updating * Approach for Online Learning</i>. In Y. Bengio, D. Schuurmans, J. Lafferty, * C. K. I. Williams,&amp;A. Culotta (Eds.), * Advances in Neural Information Processing Systems 22 (pp. 2259–2267). * </li> * </ul> * @author Edward Raff */ public class DUOL extends BaseUpdateableClassifier implements BinaryScoreClassifier, Parameterized { private static final long serialVersionUID = -4751569462573287056L; /** * Kernel trick to use */ @ParameterHolder protected KernelTrick k; /** * Set of support vectors */ protected List<Vec> S; /** * Cached outputs of the current decision function on each support vector */ protected List<Double> f_s; /** * Signed weights for each support vector. Original paper uses the notation * of gamma in the original paper, but this is weird and easily confused * with the sign values y_i. The sign class values can be obtained from the * signed alphas using {@link Math#signum(double) } */ protected List<Double> alphas; protected List<Double> accelCache; /** * Stores the values of k(x_i, y) for reuse when observing a new example */ protected DoubleList kTmp; protected double rho = 0; protected double C = 10; /** * Creates a new DUOL learner * @param k the kernel to use */ public DUOL(KernelTrick k) { this.k = k; this.S = new ArrayList<Vec>(); this.f_s = new DoubleList(); this.alphas = new DoubleList(); } /** * Copy constructor * @param other the object to copy */ protected DUOL(DUOL other) { this.k = other.k.clone(); if(other.S != null) { this.S = new ArrayList<Vec>(other.S.size()); for(Vec v : other.S) this.S.add(v.clone()); this.f_s = new DoubleList(other.f_s); this.alphas = new DoubleList(other.alphas); if(other.accelCache != null) this.accelCache = new DoubleList(other.accelCache); if(other.kTmp != null) this.kTmp = new DoubleList(other.kTmp); } this.rho = other.rho; this.C = other.C; } @Override public DUOL clone() { return new DUOL(this); } /** * Sets the aggressiveness parameter. Increasing the value of this parameter * increases the aggressiveness of the algorithm. It must be a positive * value. This parameter essentially performs a type of regularization on * the updates * @param C the aggressiveness parameter in (0, Inf) */ public void setC(double C) { if(Double.isNaN(C) || C <= 0 || Double.isInfinite(C)) throw new IllegalArgumentException("C parameter must be in range (0, inf) not " + C); this.C = C; } /** * Returns the aggressiveness parameter * @return the aggressiveness parameter */ public double getC() { return C; } /** * Sets the "conflict" parameter, which controls how often double updates * are performed. Smaller (near zero) values tend to produce more double * updates, with values near 1 producing few double updates. The value must * be in the range [0, 1] * @param rho the conflict parameter for when to update a second support vector */ public void setRho(double rho) { this.rho = rho; } /** * Returns the "conflict" parameter value for the threshold of performing double updates * @return the "conflict" parameter value for the threshold of performing double updates */ public double getRho() { return rho; } /** * Sets the kernel trick to use * @param k the kernel trick to use */ public void setKernel(KernelTrick k) { this.k = k; } /** * Returns the kernel trick in use * @return the kernel trick in use */ public KernelTrick getKernel() { return k; } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { if(numericAttributes <= 0) throw new FailedToFitException("DUOL requires numeric features"); else if(predicting.getNumOfCategories() != 2) throw new FailedToFitException("DUOL supports only binnary classification"); this.S = new ArrayList<Vec>(); this.f_s = new DoubleList(); this.alphas = new DoubleList(); this.accelCache = new DoubleList(); this.kTmp = new DoubleList(); } @Override public synchronized void update(DataPoint dataPoint, double weight, int targetClass) { final Vec x_t = dataPoint.getNumericalValues(); final double y_t = targetClass*2-1; final List<Double> qi = k.getQueryInfo(x_t); double score = score(x_t, qi, true); final double loss_t = max(0, 1-y_t*score); if(loss_t <= 0) return; //start of line 8: int b = -1; double w_min = Double.POSITIVE_INFINITY; for(int i = 0; i < S.size(); i++) { if(f_s.get(i) <= 1) { double tmp = signum(alphas.get(i))*y_t*kTmp.get(i); if(tmp <= w_min) { w_min = tmp; b = i; } } } final double k_t = k.eval(0, 0, Arrays.asList(x_t), qi); if(w_min <= -rho) { final double k_b = k.eval(b, b, S, accelCache); final double k_tb = kTmp.get(b); final double alpha_b = alphas.get(b); final double w_tb = y_t*signum(alpha_b)*k_tb; final double gamma_hat_b = abs(alpha_b); final double loss_b = 1-signum(alpha_b)*f_s.get(b); //(C-gamma_hat_b) is common expression bellow, so use this insted final double CmGhb = (C-gamma_hat_b); final double gamma_t; final double gamma_b; final double gamma_b_delta; if(k_t*C +w_tb*CmGhb-loss_t < 0 && k_b*CmGhb+w_tb*C-loss_b < 0) { gamma_t = C; gamma_b_delta = CmGhb; } else if( (w_tb*w_tb*C-w_tb*loss_b-k_t*k_b*C+k_b*loss_t)/k_b > 0 && isIn((loss_b-w_tb*C)/k_b, -gamma_hat_b, CmGhb) ) { gamma_t = C; gamma_b_delta = (loss_b-w_tb*C)/k_b; } else if( isIn((loss_t-w_tb*CmGhb)/k_t, 0, C) && loss_b-k_b*CmGhb-w_tb*(loss_t-w_tb*CmGhb)/k_t > 0 ) { gamma_t = (loss_t-w_tb*CmGhb/k_t); gamma_b_delta = CmGhb; } else//last case is the only option by elimination, no need to write complicated if statment for it { final double denom = k_t*k_b-w_tb*w_tb; gamma_t = (k_b*loss_t-w_tb*loss_b)/denom; gamma_b_delta = (k_t*loss_b-w_tb*loss_t)/denom; } gamma_b = gamma_hat_b+gamma_b_delta; //add new SV S.add(x_t); accelCache.addAll(qi); kTmp.add(k_t); alphas.add(y_t*gamma_t); //dont forget curretn SV self value which gets updated in the loop f_s.add(score); for(int i = 0; i < S.size(); i++) { final double y_i = signum(alphas.get(i)); f_s.set(i, f_s.get(i)+y_i*gamma_t*y_t*kTmp.get(i)+y_i*gamma_b_delta*signum(alpha_b)*k.eval(i, b, S, accelCache)); } //update old weight for b alphas.set(b, signum(alpha_b)*gamma_b); } else /* no auxiliary example found */ { final double gamma_t = min(C, loss_t/k_t); //add new SV S.add(x_t); accelCache.addAll(qi); kTmp.add(k_t); alphas.add(y_t*gamma_t); //dont forget curretn SV self value which gets updated in the loop f_s.add(score); for(int i = 0; i < S.size(); i++) { final double y_i = signum(alphas.get(i)); f_s.set(i, f_s.get(i)+y_i*gamma_t*y_t*kTmp.get(i)); } } } private boolean isIn(double x, double a, double b) { return a <= x && x <= b; } private double score(Vec x, List<Double> qi, boolean store) { if(store) kTmp.clear(); double score = 0; for(int i = 0; i < S.size(); i++) { double tmp = k.eval(i, x, qi, S, accelCache); if(store) kTmp.add(tmp); score += alphas.get(i)*tmp; } return score; } private double score(Vec x, List<Double> qi) { return score(x, qi, false); } @Override public CategoricalResults classify(DataPoint data) { if(alphas == null) throw new UntrainedModelException("Model has not yet been trained"); CategoricalResults cr = new CategoricalResults(2); double score = getScore(data); if(score < 0) cr.setProb(0, 1.0); else cr.setProb(1, 1.0); return cr; } @Override public double getScore(DataPoint dp) { Vec x = dp.getNumericalValues(); return score(x, k.getQueryInfo(x)); } @Override public boolean supportsWeightedData() { return false; } /** * Guesses the distribution to use for the C parameter * * @param d the dataset to get the guess for * @return the guess for the C parameter * @see #setC(double) */ public static Distribution guessC(DataSet d) { return new LogUniform(1e-4, 1e5); } }
11,867
30.648
129
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/kernelized/Forgetron.java
package jsat.classifiers.linear.kernelized; import static java.lang.Math.*; import java.util.Arrays; import java.util.List; import jsat.classifiers.BaseUpdateableClassifier; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.classifiers.DataPoint; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.classifiers.neuralnetwork.Perceptron; import jsat.distributions.kernels.KernelTrick; import jsat.exceptions.FailedToFitException; import jsat.linear.Vec; import jsat.parameters.Parameter; import jsat.parameters.Parameter.ParameterHolder; import jsat.parameters.Parameterized; /** * Implementation of the first two Forgetron algorithms. The Forgetron is a * kernelized version of the {@link Perceptron} that maintains a fixed sized * buffer of data instances that it uses to form its decision boundary. * <br><br> * See:<br> * Dekel, O., Shalev-Shwartz, S.,&amp;Singer, Y. (2008). <i>The Forgetron: A * kernel-based perceptron on a fixed budget</i>. SIAM Journal on Computing, * 37(5), 1342–1372. * * @author Edward Raff */ public class Forgetron extends BaseUpdateableClassifier implements BinaryScoreClassifier, Parameterized { private static final long serialVersionUID = -2631315082407427077L; @ParameterHolder private KernelTrick K; private Vec[] I; /** * Stores the label times the weight. Getting the true weight is an abs * operation. Getting the true label is a signum operation. */ private double[] s; private int size; /** * Will always point to current insert position. Either empty, or the last * value ever inserted */ private int curPos; private int budget; private double U; private double Bconst; private double Q, M; private boolean selfTuned = true; /** * Creates a new Forgetron * @param kernel the kernel function to use * @param budget the maximum number of data points to use */ public Forgetron(KernelTrick kernel, int budget) { this.K = kernel; setBudget(budget); } /** * Sets whether or not the self-tuned variant of the Forgetron is used, the * default is {@code true} * * @param selfTurned {@code true} to use the self-tuned variance, * {@code false} otherwise. */ public void setSelfTurned(boolean selfTurned) { this.selfTuned = selfTurned; } /** * * @return {@code true} if the self-tuned variant is used, {@code false} * otherwise. */ public boolean isSelfTuned() { return selfTuned; } /** * Copy constructor * @param toClone the forgetron to clone */ protected Forgetron(Forgetron toClone) { super(toClone); this.K = toClone.K.clone(); this.budget = toClone.budget; this.U = toClone.U; this.Bconst = toClone.Bconst; this.Q = toClone.Q; this.M = toClone.M; this.curPos = toClone.curPos; this.size = toClone.size; if(toClone.I != null) { this.I = new Vec[toClone.I.length]; for(int i = 0; i < toClone.I.length; i++) if(toClone.I[i] != null) this.I[i] = toClone.I[i].clone(); } if(toClone.s != null) this.s = Arrays.copyOf(toClone.s, toClone.s.length); } /** * Sets the new budget, which is the maximum number of data points the * Forgetron can use to form its decision boundary. * @param budget the maximum number of data points to use */ public void setBudget(int budget) { this.budget = budget; double B = budget; U = sqrt((B+1)/log(B+1))/4; Bconst = pow(B+1, 1.0/(2*B+2)); } /** * Returns the current budget * @return the current budget */ public int getBudget() { return budget; } /** * Sets the kernel trick to use * @param K the kernel trick to use */ public void setKernelTrick(KernelTrick K) { this.K = K; } /** * Returns the current kernel trick * @return the current kernel trick */ public KernelTrick getKernelTrick() { return K; } @Override public CategoricalResults classify(DataPoint data) { CategoricalResults cr = new CategoricalResults(2); int winner = (int) ((signum(getScore(data))+1)/2); cr.setProb(winner, 1); return cr; } @Override public double getScore(DataPoint dp) { return classify(dp.getNumericalValues()); } private double classify(Vec x) { double r = 0; for(int i = 0; i < size; i++) r += s[i]*K.eval(I[i], x); return r; } @Override public boolean supportsWeightedData() { return false; } @Override public Forgetron clone() { return new Forgetron(this); } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { if(predicting.getNumOfCategories() != 2) throw new FailedToFitException("Forgetron only supports binary classification"); else if(numericAttributes == 0) throw new FailedToFitException("Forgetron requires numeric attributes"); I = new Vec[budget]; s = new double[budget]; Q = M = 0; size = 0; curPos = 0; } /** * See equation 15 * @param lambda * @param mu * @return the update for equation 15 */ private double psi(double lambda, double mu) { return lambda*lambda+2*lambda-2*lambda*mu; } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { Vec x = dataPoint.getNumericalValues(); double f_t = classify(x); double y_t = targetClass*2-1; if(y_t*f_t > 0) { //its all cool bro } else//not cool bro (error) { M++; if (selfTuned) { if (size + 1 <= budget)//in budget, we can add safly { size++; I[curPos] = x; s[curPos] = y_t; } else//over budget, remove oldest { final int r = curPos; //f'_t equation (27) final double fp_t = classify(I[r]) + y_t * K.eval(x, I[r]); //equations (44) final double s_r = abs(s[r]); final double y_r = signum(s[r]); final double a = s_r * s_r - 2 * y_r * s_r * fp_t; final double b = 2 * s_r; final double c = Q - (15.0 / 32.0) * M; final double d = b * b - 4 * a * c; //equations (43) double phi_t; if ((a > 0 || (a < 0 && d > 0 && (-b - sqrt(d)) / (2 * a) > 1))) phi_t = min(1, (-b + sqrt(d)) / (2 * a)); else if (abs(a) <= 1e-13) phi_t = min(1, -c / b); else phi_t = 1; double fpp_t_r = phi_t * fp_t; Q += psi(phi_t * s_r, y_r * fpp_t_r); I[curPos] = x; s[curPos] = y_t; if (phi_t != 1) for (int i = 0; i < s.length; i++) s[i] *= phi_t; } } else//normal version { double ff = 1;//for the added term that makes us remove one. if(size > 0) { for (int i = 0; i < size; i++) ff += pow(s[i], 2) * K.eval(I[i], I[i]); } double fNorm = sqrt(ff);//obtained from after equation 2 double phi = min(Bconst, U/fNorm); I[curPos] = x; s[curPos] = y_t; if(size < budget) size++; for(int i = 0; i < size; i++) s[i] *= phi; } curPos = (curPos + 1) % I.length; } } }
8,502
27.343333
113
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/kernelized/KernelSGD.java
package jsat.classifiers.linear.kernelized; import java.util.List; import java.util.concurrent.ExecutorService; import jsat.DataSet; import jsat.classifiers.BaseUpdateableClassifier; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.DataPoint; import jsat.classifiers.UpdateableClassifier; import jsat.classifiers.linear.LinearSGD; import jsat.distributions.Distribution; import jsat.distributions.LogUniform; import jsat.distributions.kernels.KernelPoint; import jsat.distributions.kernels.KernelPoints; import jsat.distributions.kernels.KernelTrick; import jsat.distributions.kernels.RBFKernel; import jsat.exceptions.FailedToFitException; import jsat.linear.DenseVector; import jsat.linear.Vec; import jsat.lossfunctions.LossC; import jsat.lossfunctions.LossFunc; import jsat.lossfunctions.LossMC; import jsat.lossfunctions.LossR; import jsat.lossfunctions.SoftmaxLoss; import jsat.parameters.Parameter; import jsat.parameters.Parameter.ParameterHolder; import jsat.parameters.Parameterized; import jsat.regression.BaseUpdateableRegressor; import jsat.regression.RegressionDataSet; import jsat.regression.UpdateableRegressor; /** * Kernel SGD is the kernelized counterpart to {@link LinearSGD}, and learns * nonlinear functions via the kernel trick. The implementation is built upon * {@link KernelPoint} and {@link KernelPoints} to support budgeted learning. * Following the LinearSGD implementation, whether or not this algorithm * supports regression, binary-classification, or multi-class classification is * controlled by the {@link #setLoss(jsat.lossfunctions.LossFunc) loss function} * used. * <br> * <br> * The learning rate decay is not configurable for this implementation, and is * decayed at a rate of {@link #setEta(double) &eta;} / * ({@link #setLambda(double) &lambda;} * (t + 2 / &lambda;)) , where {@code t} * is the time step. * * @author Edward Raff */ public class KernelSGD implements UpdateableClassifier, UpdateableRegressor, Parameterized { private static final long serialVersionUID = -4956596506787859023L; private LossFunc loss; @ParameterHolder private KernelTrick kernel; private double lambda; private double eta; private KernelPoint.BudgetStrategy budgetStrategy; private int budgetSize; private double errorTolerance; private int time; private KernelPoint kpoint; private KernelPoints kpoints; private int epochs = 1; /** * Creates a new Kernel SGD object for classification with the RBF kernel */ public KernelSGD() { this(new SoftmaxLoss(), new RBFKernel(), 1e-4, KernelPoint.BudgetStrategy.MERGE_RBF, 300); } /** * Creates a new Kernel SGD object * @param loss the loss function to use * @param kernel the kernel trick to use * @param lambda the regularization penalty * @param budgetStrategy the budget maintenance strategy to use * @param budgetSize the maximum support vector budget */ public KernelSGD(LossFunc loss, KernelTrick kernel, double lambda, KernelPoint.BudgetStrategy budgetStrategy, int budgetSize) { this(loss, kernel, lambda, budgetStrategy, budgetSize, 1.0, 0.05); } /** * Creates a new Kernel SGD object * @param loss the loss function to use * @param kernel the kernel trick to use * @param lambda the regularization penalty * @param eta the initial learning rate * @param budgetStrategy the budget maintenance strategy to use * @param errorTolerance the error tolerance used in certain budget maintenance steps * @param budgetSize the maximum support vector budget */ public KernelSGD(LossFunc loss, KernelTrick kernel, double lambda, KernelPoint.BudgetStrategy budgetStrategy, int budgetSize, double eta, double errorTolerance) { setLoss(loss); setKernel(kernel); setLambda(lambda); setEta(eta); setBudgetStrategy(budgetStrategy); setErrorTolerance(errorTolerance); setBudgetSize(budgetSize); } /** * Copy constructor * @param toCopy the object to copy */ public KernelSGD(KernelSGD toCopy) { this.loss = toCopy.loss.clone(); this.kernel = toCopy.kernel.clone(); this.lambda = toCopy.lambda; this.eta = toCopy.eta; this.budgetStrategy = toCopy.budgetStrategy; this.budgetSize = toCopy.budgetSize; this.errorTolerance = toCopy.errorTolerance; this.time = toCopy.time; this.epochs = toCopy.epochs; if(toCopy.kpoint != null) this.kpoint = toCopy.kpoint.clone(); if(toCopy.kpoints != null) this.kpoints = toCopy.kpoints.clone(); } /** * Sets the number of iterations of the training set done during batch * training * @param epochs the number of iterations in batch training */ public void setEpochs(int epochs) { if(epochs < 1) throw new IllegalArgumentException("Epochs must be a poistive constant, not " + epochs); this.epochs = epochs; } /** * Returns the number of epochs to use * @return the number of epochs to use */ public int getEpochs() { return epochs; } /** * Sets the loss function to use. The loss function controls whether or not * classification or regression is supported. * @param loss */ public void setLoss(LossFunc loss) { if(loss == null) throw new NullPointerException("Loss may not be null"); this.loss = loss; } /** * Returns the loss function in use * @return the loss function in use */ public LossFunc getLoss() { return loss; } /** * Sets the L<sub>2</sub> regularization parameter used during learning. * @param lambda the positive regularization parameter */ public void setLambda(double lambda) { if(lambda <= 0 || Double.isNaN(lambda) || Double.isInfinite(lambda)) throw new IllegalArgumentException("lambda must be a positive constant, not " + lambda); this.lambda = lambda; } /** * Returns the L<sub>2</sub> regularization parameter * @return the L<sub>2</sub> regularization parameter */ public double getLambda() { return lambda; } /** * Sets the error tolerance used for certain * {@link #setBudgetStrategy(jsat.distributions.kernels.KernelPoint.BudgetStrategy) budget strategies} * @param errorTolerance the error tolerance in [0, 1] */ public void setErrorTolerance(double errorTolerance) { if(errorTolerance < 0 || errorTolerance > 1 || Double.isNaN(errorTolerance)) throw new IllegalArgumentException("Error tolerance must be in [0, 1], not " + errorTolerance); this.errorTolerance = errorTolerance; } /** * Returns the error tolerance that would be used * @return the error tolerance that would be used */ public double getErrorTolerance() { return errorTolerance; } /** * Sets the maximum budget size, or number of support vectors, to allow * during training. Increasing the budget size will increase the accuracy of * the model, but will also increase the computational cost * @param budgetSize the maximum allowed number of support vectors */ public void setBudgetSize(int budgetSize) { if(budgetSize < 1) throw new IllegalArgumentException("Budgest size must be a positive constant, not " + budgetSize); this.budgetSize = budgetSize; } /** * Returns the budget size, or maximum number of allowed support vectors. * @return the maximum number of allowed support vectors */ public int getBudgetSize() { return budgetSize; } /** * Sets the budget maintenance strategy. * @param budgetStrategy the method to meet budget size requirements */ public void setBudgetStrategy(KernelPoint.BudgetStrategy budgetStrategy) { if(budgetStrategy == null) throw new NullPointerException("Budget strategy must be non null"); this.budgetStrategy = budgetStrategy; } /** * Returns the method of budget maintenance * @return the method of budget maintenance */ public KernelPoint.BudgetStrategy getBudgetStrategy() { return budgetStrategy; } /** * Sets the base learning rate to start from. Because of the decay rate in * use a good value for &eta; is 1.0. * @param eta the starting learning rate to use */ public void setEta(double eta) { this.eta = eta; } /** * Returns the base learning rate * @return the base learning rate */ public double getEta() { return eta; } /** * Sets the kernel to use * @param kernel the kernel to use */ public void setKernel(KernelTrick kernel) { if(kernel == null) throw new NullPointerException("kernel trick must be non null"); this.kernel = kernel; } /** * Returns the kernel in use * @return the kernel in use */ public KernelTrick getKernel() { return kernel; } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { if(!(loss instanceof LossC)) throw new FailedToFitException("Loss in use (" + loss.getClass().getSimpleName() + ") does not support classification"); if(predicting.getNumOfCategories() == 2) { kpoint = new KernelPoint(kernel, errorTolerance); kpoint.setBudgetStrategy(budgetStrategy); kpoint.setErrorTolerance(errorTolerance); kpoint.setMaxBudget(budgetSize); kpoints = null; } else { if(!(loss instanceof LossMC)) throw new FailedToFitException("Loss in use (" + loss.getClass().getSimpleName() + ") does not support multi-class classification"); kpoint = null; kpoints = new KernelPoints(kernel, predicting.getNumOfCategories(), errorTolerance); kpoints.setBudgetStrategy(budgetStrategy); kpoints.setErrorTolerance(errorTolerance); kpoints.setMaxBudget(budgetSize); } time = 0; } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes) { if(!(loss instanceof LossR)) throw new FailedToFitException("Loss in use (" + loss.getClass().getSimpleName() + ") does not support regession"); kpoint = new KernelPoint(kernel, errorTolerance); kpoint.setBudgetStrategy(budgetStrategy); kpoint.setErrorTolerance(errorTolerance); kpoint.setMaxBudget(budgetSize); kpoints = null; time = 0; } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { final Vec x = dataPoint.getNumericalValues(); final List<Double> qi = kernel.getQueryInfo(x); final double eta_t = getNextEta(); if(kpoint != null) { kpoint.mutableMultiply(1-eta_t*lambda); final double y = targetClass*2-1; double dot = kpoint.dot(x, qi); double lossD = ((LossC)loss).getDeriv(dot, y); if(lossD != 0) kpoint.mutableAdd(-eta_t*lossD, x, qi); } else if(kpoints != null) { kpoints.mutableMultiply(1-eta_t*lambda); Vec pred = new DenseVector(kpoints.dot(x, qi)); ((LossMC)loss).process(pred, pred); ((LossMC)loss).deriv(pred, pred, targetClass); pred.mutableMultiply(-eta_t);//should we wrap in a scaledVec? Probably fine unless someone pulls out a 200 class problem kpoints.mutableAdd(x, pred, qi); } } @Override public void update(DataPoint dataPoint, double weight, double targetValue) { final Vec x = dataPoint.getNumericalValues(); final List<Double> qi = kernel.getQueryInfo(x); final double eta_t = getNextEta(); kpoint.mutableMultiply(1 - eta_t * lambda); final double y = targetValue; double dot = kpoint.dot(x, qi); double lossD = ((LossR) loss).getDeriv(dot, y); if (lossD != 0) kpoint.mutableAdd(-eta_t * lossD, x, qi); } @Override public CategoricalResults classify(DataPoint data) { final Vec x = data.getNumericalValues(); final List<Double> qi = kernel.getQueryInfo(x); if(kpoint != null) return ((LossC)loss).getClassification(kpoint.dot(x, qi)); else { Vec pred = new DenseVector(kpoints.dot(x, qi)); ((LossMC)loss).process(pred, pred); return ((LossMC)loss).getClassification(pred); } } @Override public double regress(DataPoint data) { final Vec x = data.getNumericalValues(); final List<Double> qi = kernel.getQueryInfo(x); return ((LossR)loss).getRegression(kpoint.dot(x, qi)); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { BaseUpdateableClassifier.trainEpochs(dataSet, this, epochs); } @Override public boolean supportsWeightedData() { return false; } @Override public void train(RegressionDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(RegressionDataSet dataSet) { BaseUpdateableRegressor.trainEpochs(dataSet, this, epochs); } @Override public KernelSGD clone() { return new KernelSGD(this); } private double getNextEta() { return eta / (lambda * (++time + 2 / lambda)); } /** * Guess the distribution to use for the regularization term * {@link #setLambda(double) &lambda;} . * * @param d the data set to get the guess for * @return the guess for the &lambda; parameter */ public static Distribution guessLambda(DataSet d) { return new LogUniform(1e-7, 1e-2); } }
14,636
30.958515
164
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/kernelized/OSKL.java
package jsat.classifiers.linear.kernelized; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Random; import jsat.DataSet; import jsat.classifiers.*; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.distributions.Distribution; import jsat.distributions.LogUniform; import jsat.distributions.kernels.KernelTrick; import jsat.linear.Vec; import jsat.lossfunctions.HingeLoss; import jsat.lossfunctions.LogisticLoss; import jsat.lossfunctions.LossC; import jsat.parameters.Parameter; import jsat.parameters.Parameter.ParameterHolder; import jsat.parameters.Parameterized; import jsat.utils.DoubleList; import jsat.utils.random.RandomUtil; import jsat.utils.random.XORWOW; /** * Online Sparse Kernel Learning by Sampling and Smooth Losses (OSKL) is an * online algorithm for learning sparse kernelized solutions to binary * classification problems. The number of support vectors is controlled by a a * sparsity parameter {@link #setG(double) G} and a specified * {@link LossC loss function}. The number of support vectors is bounded by the * cumulative loss of the loss function used. <br> * <br> * The OSKL algorithm is designed for use with smooth loss functions such as * the {@link LogisticLoss logistic loss}. However, it can work with non-smooth * loss functions such as the {@link HingeLoss hinge loss}. <br> * <br> * See: Zhang, L., Yi, J., Jin, R., Lin, M.,&amp;He, X. (2013). <i>Online Kernel * Learning with a Near Optimal Sparsity Bound</i>. In S. Dasgupta&amp;D. * Mcallester (Eds.), Proceedings of the 30th International Conference on * Machine Learning (ICML-13) (Vol. 28, pp. 621–629). JMLR Workshop and * Conference Proceedings. * * @author Edward Raff */ public class OSKL extends BaseUpdateableClassifier implements BinaryScoreClassifier, Parameterized { private static final long serialVersionUID = 4207594016856230134L; @ParameterHolder private KernelTrick k; private double eta; private double R; private double G; private double curSqrdNorm; private LossC lossC; private boolean useAverageModel = true; //Data used for capturing the average private int t; /** * Last time alphaAverage was updated */ private int last_t; private int burnIn; /** * Store the average of the weights over time */ private DoubleList alphaAveraged; private List<Vec> vecs; private DoubleList alphas; private DoubleList inputKEvals; private List<Double> accelCache; private Random rand; /** * Creates a new OSKL learner using the {@link LogisticLoss}. The parameters * {@link #setG(double) } and {@link #setEta(double) } are set based on the * original papers suggestions to produced a less sparse model that should * be more accurate * * @param k the kernel to use * @param R the maximum allowed norm for the model */ public OSKL(KernelTrick k, double R) { this(k, 0.9, 1, R); } /** * Creates a new OSKL learner using the {@link LogisticLoss} * @param k the kernel to use * @param eta the learning rate to use * @param G the sparsification parameter * @param R the maximum allowed norm for the model */ public OSKL(KernelTrick k, double eta, double G, double R) { this(k, eta, G, R, new LogisticLoss()); } /** * Creates a new OSKL learner * @param k the kernel to use * @param eta the learning rate to use * @param G the sparsification parameter * @param R the maximum allowed norm for the model * @param lossC the loss function to use */ public OSKL(KernelTrick k, double eta, double G, double R, LossC lossC) { setKernel(k); setEta(eta); setR(R); setG(G); this.lossC = lossC; } /** * Copy constructor * @param toCopy the object to copy */ public OSKL(OSKL toCopy) { this.k = toCopy.k.clone(); this.eta = toCopy.eta; this.R = toCopy.R; this.G = toCopy.G; this.curSqrdNorm = toCopy.curSqrdNorm; this.lossC = toCopy.lossC.clone(); this.t = toCopy.t; this.last_t = toCopy.last_t; this.useAverageModel = toCopy.useAverageModel; this.burnIn = toCopy.burnIn; if(toCopy.vecs != null) { this.vecs = new ArrayList<Vec>(); for(Vec v : toCopy.vecs) this.vecs.add(v.clone()); this.alphas = new DoubleList(toCopy.alphas); this.alphaAveraged = new DoubleList(toCopy.alphaAveraged); this.inputKEvals = new DoubleList(toCopy.inputKEvals); } if(toCopy.accelCache != null) this.accelCache = new DoubleList(toCopy.accelCache); this.rand = RandomUtil.getRandom(); } /** * Sets the kernel to use * @param k the kernel to use */ public void setKernel(KernelTrick k) { this.k = k; } /** * Returns the kernel to use * @return the kernel to use */ public KernelTrick getKernel() { return k; } /** * Sets the learning rate to use for training. The original paper suggests * setting &eta; = 0.9/{@link #setG(double) G} * @param eta the positive learning rate to use */ public void setEta(double eta) { if(eta <= 0 || Double.isNaN(eta) || Double.isInfinite(eta)) throw new IllegalArgumentException("Eta must be positive, not " + eta); this.eta = eta; } /** * Returns the learning rate in use * @return the learning rate in use */ public double getEta() { return eta; } /** * Sets the sparsification parameter G. Increasing G reduces the number of * updates to the model, which increases sparsity but may reduce accuracy. * Decreasing G increases the update rate reducing sparsity. The original * paper tests values of G &isin; {1, 2, 4, 10} * @param G the sparsification parameter in [1, &infin;) */ public void setG(double G) { if(G < 1 || Double.isInfinite(G) || Double.isNaN(G)) throw new IllegalArgumentException("G must be in [1, Infinity), not " + G); this.G = G; } /** * Returns the sparsification parameter * @return the sparsification parameter */ public double getG() { return G; } /** * Guesses the distribution to use for the R parameter * * @param d the dataset to get the guess for * @return the guess for the R parameter * @see #setR(double) */ public static Distribution guessR(DataSet d) { return new LogUniform(1, 1e5); } /** * Sets the maximum allowed norm of the model. The * original paper suggests values in the range 10<sup>x</sup> for <i>x</i> * &isin; {0, 1, 2, 3, 4, 5}. * @param R the maximum allowed norm for the model */ public void setR(double R) { if(R <= 0 || Double.isNaN(R) || Double.isInfinite(R)) throw new IllegalArgumentException("R must be positive, not " + R); this.R = R; } /** * Returns the maximum allowed norm for the model learned * @return the maximum allowed norm for the model learned */ public double getR() { return R; } /** * Sets whether or not the average of all intermediate models is used or if * the most recent model is used when performing classification * @param useAverageModel {@code true} to use the average model, * {@code false} to use the last model update */ public void setUseAverageModel(boolean useAverageModel) { this.useAverageModel = useAverageModel; } /** * Returns {@code true} if the average of all models is being used, or * {@code false} if the last model is used * @return {@code true} if the average of all models is being used, or * {@code false} if the last model is used */ public boolean isUseAverageModel() { return useAverageModel; } /** * Sets the number of update calls to consider as part of the "burn in" * phase. The averaging of the model will not start until after the burn in * phase. <br> * If the classification or score is requested before the burn in phase is * completed, the latest model will be used as is. * @param burnIn the number of updates to ignore before averaging. Must be * non negative. */ public void setBurnIn(int burnIn) { if(burnIn < 0) throw new IllegalArgumentException("Burn in must be non negative, not " + burnIn); this.burnIn = burnIn; } /** * Returns the number of burn in rounds * @return the number of burn in rounds */ public int getBurnIn() { return burnIn; } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { rand = RandomUtil.getRandom(); vecs = new ArrayList<Vec>(); alphas = new DoubleList(); alphaAveraged = new DoubleList(); t = 0; last_t = 0; inputKEvals = new DoubleList(); if(k.supportsAcceleration()) accelCache = new DoubleList(); else accelCache = null; curSqrdNorm = 0; } /** * Returns the number of data points accepted as support vectors * @return the number of support vectors in the model */ public int getSupportVectorCount() { if(vecs == null) return 0; else return vecs.size(); } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { final Vec x_t = dataPoint.getNumericalValues(); final List<Double> qi = k.getQueryInfo(x_t); final double score = scoreSaveEval(x_t, qi); final double y_t = targetClass*2-1; //4: Compute the derivative ℓ′(yt, ft(xt)) final double lossD = lossC.getDeriv(score, y_t); t++; // Step 5: Sample a binary random variable Zt with if(rand.nextDouble() > Math.abs(lossD)/G) return;//"failed", no update final double alpha_t = -eta*Math.signum(lossD)*G; //Update the squared norm curSqrdNorm += alpha_t*alpha_t*inputKEvals.getD(0); for(int i = 0; i < alphas.size(); i++) curSqrdNorm += 2*alpha_t*alphas.getD(i)*inputKEvals.getD(i+1); //add values alphas.add(alpha_t); vecs.add(x_t); if(accelCache != null) accelCache.addAll(qi); //update online alpha averages for current & old SVs alphaAveraged.add(0.0);//implicit zero for time we didn't have new SVs updateAverage(); //project alphas to maintain norm if needed if(curSqrdNorm > R*R) { double coeff = R/Math.sqrt(curSqrdNorm); alphas.getVecView().mutableMultiply(coeff); curSqrdNorm *= coeff*coeff; } }; private double score(Vec x, List<Double> qi) { DoubleList alphToUse; if(useAverageModel && t > burnIn) { updateAverage(); alphToUse = alphaAveraged; } else alphToUse = alphas; return k.evalSum(vecs, accelCache, alphToUse.getBackingArray(), x, qi, 0, alphToUse.size()); } /** * Computes the score and saves the results of the kernel computations in * {@link #inputKEvals}. The first value in the list will be the self kernel * product * @param x the input vector * @param qi the query information for the vector * @return the dot product in the kernel space */ private double scoreSaveEval(Vec x, List<Double> qi) { inputKEvals.clear(); inputKEvals.add(k.eval(0, 0, Arrays.asList(x), qi)); double sum = 0; for(int i = 0; i < alphas.size(); i++) { double k_ix = k.eval(i, x, qi, vecs, accelCache); inputKEvals.add(k_ix); sum += alphas.getD(i)*k_ix; } return sum; } @Override public CategoricalResults classify(DataPoint data) { Vec x = data.getNumericalValues(); return lossC.getClassification(score(x, k.getQueryInfo(x))); } @Override public boolean supportsWeightedData() { return false; } @Override public double getScore(DataPoint dp) { Vec x = dp.getNumericalValues(); return score(x, k.getQueryInfo(x)); } @Override public OSKL clone() { return new OSKL(this); } /** * Updates the average model to reflect the current time average */ private void updateAverage() { if(t == last_t || t < burnIn) return; else if(last_t < burnIn)//first update since done burning { for(int i = 0; i < alphaAveraged.size(); i++) alphaAveraged.set(i, alphas.get(i)); } double w = t-last_t;//time elapsed for(int i = 0; i < alphaAveraged.size(); i++) { double delta = alphas.getD(i) - alphaAveraged.getD(i); alphaAveraged.set(i, alphaAveraged.getD(i)+delta*w/t); } last_t = t;//average done } }
13,583
30.084668
113
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/linear/kernelized/Projectron.java
package jsat.classifiers.linear.kernelized; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import jsat.classifiers.BaseUpdateableClassifier; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.classifiers.DataPoint; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.classifiers.linear.PassiveAggressive; import jsat.classifiers.neuralnetwork.Perceptron; import jsat.distributions.kernels.KernelTrick; import jsat.exceptions.FailedToFitException; import jsat.linear.DenseMatrix; import jsat.linear.DenseVector; import jsat.linear.Matrix; import jsat.linear.SubMatrix; import jsat.linear.Vec; import jsat.parameters.Parameter; import jsat.parameters.Parameter.ParameterHolder; import jsat.parameters.Parameterized; import jsat.utils.DoubleList; /** * An implementation of the Projectron and Projectrion++ algorithms. These are a * kernelized extensions of the {@link Perceptron} that bound the number of * support vectors used, with the latter incorporating some similarities from * {@link PassiveAggressive}. <br> * Unlike many other bounded kernel learners, the number of support vectors is * not specified by the user. This value is controlled by a sparsity parameter * {@link #setEta(double) }. * <br><br> * See: * <ul> * <li>Orabona, F., Keshet, J.,&amp;Caputo, B. (2008). <i>The Projectron: a * bounded kernel-based Perceptron</i>. Proceedings of the 25th international * conference on Machine learning - ICML ’08 (pp. 720–727). New York, New York, * USA: ACM Press. doi:10.1145/1390156.1390247</li> * <li>Orabona, F., Keshet, J.,&amp;Caputo, B. (2009). <i>Bounded Kernel-Based * Online Learning</i>. The Journal of Machine Learning Research, 10, 2643–2666. * </li> * </ul> * * @author Edward Raff */ public class Projectron extends BaseUpdateableClassifier implements BinaryScoreClassifier, Parameterized { private static final long serialVersionUID = -4025799790045954359L; @ParameterHolder private KernelTrick k; private double eta; /** * Marked as "d" in the original papers */ private DoubleList alpha; private List<Vec> S; private List<Double> cacheAccel; private Matrix InvK; private Matrix InvKExpanded; private double[] k_raw; private boolean useMarginUpdates; /** * Creates a new Projectron++ learner * * @param k the kernel to use */ public Projectron(KernelTrick k) { this(k, 0.1); } /** * Creates a new Projectron++ learner * * @param k the kernel to use * @param eta the sparsity parameter */ public Projectron(KernelTrick k, double eta) { this(k, eta, true); } /** * Creates a new Projectron learner * * @param k the kernel to use * @param eta the sparsity parameter * @param useMarginUpdates whether or not to perform projection updates on * margin errors */ public Projectron(KernelTrick k, double eta, boolean useMarginUpdates) { setKernel(k); setEta(eta); setUseMarginUpdates(useMarginUpdates); } /** * Copy constructor * * @param toCopy the object to copy */ protected Projectron(Projectron toCopy) { this.k = toCopy.k.clone(); this.eta = toCopy.eta; if (toCopy.S != null) { this.alpha = new DoubleList(toCopy.alpha); this.S = new ArrayList<Vec>(toCopy.S); this.cacheAccel = new DoubleList(toCopy.cacheAccel); this.InvKExpanded = toCopy.InvKExpanded.clone(); this.InvK = new SubMatrix(this.InvKExpanded, 0, 0, toCopy.InvK.rows(), toCopy.InvK.cols()); this.k_raw = Arrays.copyOf(toCopy.k_raw, toCopy.k_raw.length); } } /** * Sets the kernel trick to be used * * @param k the kernel trick to be use */ public void setKernel(KernelTrick k) { this.k = k; } /** * Returns the kernel trick in use * * @return the kernel trick in use */ public KernelTrick getKernel() { return k; } /** * Sets the &eta; parameter which controls the sparsity of the Projection * solution. Larger values result in greater sparsity, at the potential loss * of accuracy. If set to 0 and {@link #setUseMarginUpdates(boolean) } is * {@code false}, the Projectron degenerates into the standard kernelized * Perceptron. * * @param eta the sparsity parameter in [0, Infinity) */ public void setEta(double eta) { if (Double.isNaN(eta) || Double.isInfinite(eta) || eta < 0) throw new IllegalArgumentException("eta must be in the range [0, Infity), not " + eta); this.eta = eta; } /** * Returns the sparsity parameter value * * @return the sparsity parameter value */ public double getEta() { return eta; } /** * Sets whether or not projection updates will be performed for margin * errors. If {@code true}, this behaves as the Projectrion++ algorithm. If * {@code false}, the behavior is equal to the standard Projectron. * * @param useMarginUpdates {@code true} to perform updates on margin errors */ public void setUseMarginUpdates(boolean useMarginUpdates) { this.useMarginUpdates = useMarginUpdates; } /** * Returns {@code true} if margin errors can cause updates, {@code false} if * not. * * @return {@code true} if margin errors can cause updates, {@code false} if * not. */ public boolean isUseMarginUpdates() { return useMarginUpdates; } @Override public Projectron clone() { return new Projectron(this); } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { if (numericAttributes < 1) throw new IllegalArgumentException("Projectrion requires numeric features"); else if (predicting.getNumOfCategories() != 2) throw new FailedToFitException("Projectrion only supports binary classification"); final int initSize = 50; alpha = new DoubleList(initSize); cacheAccel = new DoubleList(initSize); S = new ArrayList<Vec>(initSize); InvKExpanded = new DenseMatrix(initSize, initSize); k_raw = new double[initSize]; } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { final Vec x_t = dataPoint.getNumericalValues(); final List<Double> qi = k.getQueryInfo(x_t); final double score = getScore(x_t, qi, k_raw); final double y_t = targetClass * 2 - 1; //First instance is a special case if (S.isEmpty()) { InvK = new SubMatrix(InvKExpanded, 0, 0, 1, 1); InvK.set(0, 0, 1.0); S.add(x_t); alpha.add(y_t); cacheAccel.addAll(qi); return; } else if (y_t * score > 1)//No updates needed return; else if (y_t * score < 1 && y_t * score > 0 && !useMarginUpdates)//margin error but we are ignoring it return; //Used for both cases, so hoisted out. DenseVector k_t = new DenseVector(k_raw, 0, S.size()); Vec d = InvK.multiply(k_t);//equation (7) final double k_xt = k.eval(0, 0, Arrays.asList(x_t), qi); final double k_t_d = k_t.dot(d); final double deltaSqrd = Math.max(k_xt - k_t_d, 0);//avoid sqrt(-val) bellow final double delta = Math.sqrt(deltaSqrd); if (Math.signum(score) != y_t) { if (delta < eta)//Project to the basis vectors { //equation (9) for (int i = 0; i < S.size(); i++) alpha.set(i, alpha.get(i) + y_t * d.get(i)); } else//Add to the basis vectors { //Make sure we have space if (S.size() == InvKExpanded.rows()) { //SubMatrix InvK holds refrence to old one with the correct values InvKExpanded = new DenseMatrix(S.size() * 2, S.size() * 2); for (int i = 0; i < InvK.rows(); i++) for (int j = 0; j < InvK.cols(); j++) InvKExpanded.set(i, j, InvK.get(i, j)); InvK = new SubMatrix(InvKExpanded, 0, 0, S.size(), S.size()); k_raw = Arrays.copyOf(k_raw, S.size() * 2); } //Now back to normal InvK = new SubMatrix(InvKExpanded, 0, 0, S.size() + 1, S.size() + 1); Vec dExp = new DenseVector(S.size() + 1); for (int i = 0; i < d.length(); i++) dExp.set(i, d.get(i)); dExp.set(S.size(), -1); if (deltaSqrd > 0) Matrix.OuterProductUpdate(InvK, dExp, dExp, 1 / deltaSqrd); S.add(x_t); alpha.add(y_t); cacheAccel.addAll(qi); } } else if (y_t * score <= 1)//(margin error) { final double loss = 1 - y_t * score;//y_t*score must be in (0, 1), so no checks needed if (loss < delta / eta) return; //see page 2655 double tau = Math.max(Math.max(loss / k_t_d, 2 * (loss - delta / eta) / k_t_d), 1.0); for (int i = 0; i < S.size(); i++) alpha.set(i, alpha.get(i) + y_t * tau * d.get(i)); } } @Override public CategoricalResults classify(DataPoint data) { CategoricalResults cr = new CategoricalResults(2); if (getScore(data) < 0) cr.setProb(0, 1.0); else cr.setProb(1, 1.0); return cr; } @Override public boolean supportsWeightedData() { return false; } private double getScore(Vec x, List<Double> qi, final double[] kStore) { double score = 0; for (int i = 0; i < S.size(); i++) { double tmp = k.eval(i, x, qi, S, cacheAccel); if (kStore != null) kStore[i] = tmp; score += alpha.get(i) * tmp; } return score; } @Override public double getScore(DataPoint dp) { return k.evalSum(S, cacheAccel, alpha.getBackingArray(), dp.getNumericalValues(), 0, S.size()); } }
10,643
31.1571
113
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/BackPropagationNet.java
package jsat.classifiers.neuralnetwork; import java.io.Serializable; import static java.lang.Math.*; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Random; import java.util.concurrent.ExecutorService; import jsat.DataSet; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.distributions.Normal; import jsat.linear.DenseMatrix; import jsat.linear.DenseVector; import jsat.linear.Matrix; import jsat.linear.Vec; import jsat.math.Function1D; import jsat.math.decayrates.DecayRate; import jsat.math.decayrates.ExponetialDecay; import jsat.parameters.IntParameter; import jsat.parameters.ObjectParameter; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.regression.RegressionDataSet; import jsat.regression.Regressor; import jsat.utils.IntList; import jsat.utils.ListUtils; import jsat.utils.random.RandomUtil; /** * An implementation of a Feed Forward Neural Network (NN) trained by Back * Propagation. NNs are powerful classifiers and regressors, but can suffer from * slow training time and overfitting. <br> * <br> * NOTE: This class should generally not be used any more. The * {@link DReDNetSimple} provides an easier to use class for most cases that * will likely converge faster. * * @author Edward Raff */ public class BackPropagationNet implements Classifier, Regressor, Parameterized { private static final long serialVersionUID = 335438198218313862L; private int inputSize, outputSize; private ActivationFunction f = softsignActiv; private DecayRate learningRateDecay = new ExponetialDecay(); private double momentum = 0.1; private double weightDecay = 0; private int epochs = 1000; private double initialLearningRate = 0.2; private WeightInitialization weightInitialization = WeightInitialization.TANH_NORMALIZED_INITIALIZATION; private double targetBump = 0.1; private int batchSize = 10; /** * Length of the array determines how many layers of hidden units. Value at * each index determines how many neurons are in each hidden layer. */ private int[] npl; /** * Matrix of weights for each hidden layer and output layer */ private List<Matrix> Ws; /** * Bias terms corresponding to each layer */ private List<Vec> bs; /** * Target min and max and scaling multiplier for regression problems to make * the target into a range that the activation function can reach */ private double targetMax, targetMin, targetMultiplier; /** * Creates a new back propagation network with one hidden layer of 1024 neurons. * @param npl the array of hidden layer information. The length indicates * how many hidden layers, and the value of each index indicates how many * neurons to place in each hidden layer */ public BackPropagationNet() { this(1024); } /** * Creates a new back propagation network. * @param npl the array of hidden layer information. The length indicates * how many hidden layers, and the value of each index indicates how many * neurons to place in each hidden layer */ public BackPropagationNet(final int... npl ) { if(npl.length < 1) throw new IllegalArgumentException("There must be at least one hidden layer"); this.npl = npl; } /** * Copy constructor * @param toClone the one to copy */ protected BackPropagationNet(BackPropagationNet toClone) { this(Arrays.copyOf(toClone.npl, toClone.npl.length)); this.inputSize = toClone.inputSize; this.outputSize = toClone.outputSize; this.f = toClone.f; this.momentum = toClone.momentum; this.weightDecay = toClone.weightDecay; this.epochs = toClone.epochs; this.initialLearningRate = toClone.initialLearningRate; this.learningRateDecay = toClone.learningRateDecay; this.weightInitialization = toClone.weightInitialization; this.targetBump = toClone.targetBump; this.targetMax = toClone.targetMax; this.targetMin = toClone.targetMin; this.targetMultiplier = toClone.targetMultiplier; this.batchSize = toClone.batchSize; if(toClone.Ws != null) { this.Ws = new ArrayList<Matrix>(toClone.Ws); for(int i = 0; i < this.Ws.size(); i++) this.Ws.set(i, this.Ws.get(i).clone()); } if(toClone.bs != null) { this.bs = new ArrayList<Vec>(toClone.bs); for(int i = 0; i < this.bs.size(); i++) this.bs.set(i, this.bs.get(i).clone()); } } /** * The main work for training the neural network * @param dataSet the data set to train from */ private void trainNN(DataSet dataSet) { //batchSize List<List<Vec>> activations = new ArrayList<List<Vec>>(batchSize); List<List<Vec>> derivatives = new ArrayList<List<Vec>>(batchSize); List<List<Vec>> deltas = new ArrayList<List<Vec>>(batchSize); List<Matrix> updates = new ArrayList<Matrix>(Ws.size()); List<Vec> cur_x = new ArrayList<Vec>(batchSize); List<Vec> prev_x = new ArrayList<Vec>(batchSize); for(int i = 0; i < batchSize; i++) { activations.add(new ArrayList<Vec>(Ws.size())); derivatives.add(new ArrayList<Vec>(Ws.size())); deltas.add(new ArrayList<Vec>(Ws.size())); for(Matrix w : Ws) { int L = w.rows(); activations.get(i).add(new DenseVector(L)); derivatives.get(i).add(new DenseVector(L)); deltas.get(i).add(new DenseVector(L)); if(i == 0) updates.add(new DenseMatrix(w.rows(), w.cols())); } } IntList iterOrder = new IntList(dataSet.size()); ListUtils.addRange(iterOrder, 0, dataSet.size(), 1); final double bSizeInv = 1.0/batchSize; for(int epoch = 0; epoch < epochs; epoch++) { Collections.shuffle(iterOrder); final double eta = learningRateDecay.rate(epoch, epochs, initialLearningRate);//learningRate; double error = 0.0; for(int iter = 0; iter < dataSet.size(); iter+=batchSize) { if(dataSet.size() - iter < batchSize) continue;//we have run out of enough sampels to do an update cur_x.clear(); //Feed batches thought network and get final mistakes for(int bi = 0; bi < batchSize; bi++) { final int idx = iterOrder.get(iter+bi); Vec x = dataSet.getDataPoint(idx).getNumericalValues(); cur_x.add(x); feedForward(x, activations.get(bi), derivatives.get(bi)); //Compution of Deltas Vec delta_out = deltas.get(bi).get(npl.length); Vec a_i = activations.get(bi).get(npl.length); Vec d_i = derivatives.get(bi).get(npl.length); error += computeOutputDelta(dataSet, idx, delta_out, a_i, d_i); } //Propigate the collected errors back for(int bi = 0; bi < batchSize; bi++) { for(int i = Ws.size()-2; i >= 0; i--) { Vec delta = deltas.get(bi).get(i); delta.zeroOut(); Matrix W = Ws.get(i+1); W.transposeMultiply(1, deltas.get(bi).get(i+1), delta); delta.mutablePairwiseMultiply(derivatives.get(bi).get(i)); } //Apply weight changes for(int i = 1; i < Ws.size(); i++) { Matrix W = Ws.get(i); Vec b = bs.get(i); W.mutableSubtract(eta*weightDecay, W); if(momentum != 0) { Matrix update = updates.get(i); update.mutableMultiply(momentum); Matrix.OuterProductUpdate(update, deltas.get(bi).get(i), activations.get(bi).get(i-1), -eta*bSizeInv); W.mutableAdd(update); } else//update directly { Matrix.OuterProductUpdate(W, deltas.get(bi).get(i), activations.get(bi).get(i-1), -eta*bSizeInv); } b.mutableAdd(-eta*bSizeInv, deltas.get(bi).get(i)); } //input layer Matrix W = Ws.get(0); W.mutableSubtract(eta*weightDecay, W); Vec b = bs.get(0); if(momentum != 0) { Matrix update = updates.get(0); update.mutableMultiply(momentum); Matrix.OuterProductUpdate(update, deltas.get(bi).get(0), cur_x.get(bi), -eta*bSizeInv); W.mutableAdd(update); } else//update directly { Matrix.OuterProductUpdate(W, deltas.get(bi).get(0), cur_x.get(bi), -eta*bSizeInv); } b.mutableAdd(-eta*bSizeInv, deltas.get(bi).get(0)); } } } } /** * Different methods of initializing the weight values before training */ public enum WeightInitialization { UNIFORM { @Override public double getWeight(int inputSize, int layerSize, double eta, Random rand) { return rand.nextDouble()*1.4-0.7; } }, GUASSIAN { @Override public double getWeight(int inputSize, int layerSize, double eta, Random rand) { return Normal.invcdf(rand.nextDouble(), 0, pow(inputSize, -0.5)); } }, TANH_NORMALIZED_INITIALIZATION { @Override public double getWeight(int inputSize, int layerSize, double eta, Random rand) { double cnst = sqrt(6.0/(inputSize+layerSize)); return rand.nextDouble()*cnst*2-cnst; } }; /** * * @param inputSize also referred to as the fan<sub>in</sub> * @param layerSize also referred to as the fan<sub>out</sub> * @param eta the initial learning rate * @param rand the source of randomness * @return one weight value */ abstract public double getWeight(int inputSize, int layerSize, double eta, Random rand); } /** * Sets the non negative momentum used in training. * @param momentum the momentum to apply to training */ public void setMomentum(double momentum) { if(momentum < 0 || Double.isNaN(momentum) || Double.isInfinite(momentum)) throw new ArithmeticException("Momentum must be non negative, not " + momentum); this.momentum = momentum; } /** * Returns the momentum in use * @return the momentum */ public double getMomentum() { return momentum; } /** * Sets the initial learning rate used for the first epoch * @param initialLearningRate the positive learning rate to use */ public void setInitialLearningRate(double initialLearningRate) { if(initialLearningRate <= 0 || Double.isNaN(initialLearningRate) || Double.isInfinite(initialLearningRate)) throw new ArithmeticException("Learning rate must be a positive cosntant, not " + initialLearningRate ); this.initialLearningRate = initialLearningRate; } /** * Returns the learning rate used * @return the learning rate used */ public double getInitialLearningRate() { return initialLearningRate; } /** * Sets the decay rate used to adjust the learning rate after each epoch * @param learningRateDecay the decay for the learning rate */ public void setLearningRateDecay(DecayRate learningRateDecay) { this.learningRateDecay = learningRateDecay; } /** * Returns the decay rate used to adjust the learning rate after each epoch * @return the decay rate used for learning */ public DecayRate getLearningRateDecay() { return learningRateDecay; } /** * Sets the number of epochs of training used. Each epoch goes through the * whole data set once. * @param epochs the number of training epochs */ public void setEpochs(int epochs) { if(epochs < 1) throw new ArithmeticException("number of training epochs must be positive, not " + epochs); this.epochs = epochs; } /** * Returns the number of epochs of training epochs for learning * @return the number of training epochs */ public int getEpochs() { return epochs; } /** * Sets the weight decay used for each update. The weight decay must be in * the range [0, 1). Weight decay values must often be very small, often * 1e-8 or less. * * @param weightDecay the weight decay to apply when training */ public void setWeightDecay(double weightDecay) { if(weightDecay < 0 || weightDecay >= 1 || Double.isNaN(weightDecay)) throw new ArithmeticException("Weight decay must be in [0,1), not " + weightDecay); this.weightDecay = weightDecay; } /** * Returns the weight decay used for each update * @return the weight decay used. */ public double getWeightDecay() { return weightDecay; } /** * Sets how the weights are initialized before training starts * @param weightInitialization the method of weight initialization */ public void setWeightInitialization(WeightInitialization weightInitialization) { this.weightInitialization = weightInitialization; } /** * Returns the method of weight initialization used * @return the method of weight initialization used */ public WeightInitialization getWeightInitialization() { return weightInitialization; } /** * Sets the batch size use to estimate the gradient of the error for * training * @param batchSize the number of training instances to use on each update */ public void setBatchSize(int batchSize) { this.batchSize = batchSize; } /** * Returns the training batch size * @return the batch size used for training */ public int getBatchSize() { return batchSize; } /** * Sets the activation function used for the network * @param f the activation function to use */ public void setActivationFunction(ActivationFunction f) { this.f = f; } /** * Returns the activation function used for training the network * @return the activation function in use */ public ActivationFunction getActivationFunction() { return f; } @Override public CategoricalResults classify(DataPoint data) { CategoricalResults cr = new CategoricalResults(outputSize); Vec x = feedForward(data.getNumericalValues()); x.mutableSubtract(f.min()+targetBump); for(int i = 0; i < x.length(); i++) cr.setProb(i, Math.max(x.get(i), 0)); cr.normalize(); return cr; } @Override public double regress(DataPoint data) { Vec x = feedForward(data.getNumericalValues()); double val = x.get(0); val = (val - f.min()-targetBump)/targetMultiplier+targetMin; return val; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { inputSize = dataSet.getNumNumericalVars(); outputSize = dataSet.getClassSize(); Random rand = RandomUtil.getRandom(); setUp(rand); trainNN(dataSet); } @Override public void train(RegressionDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(RegressionDataSet dataSet) { targetMax = Double.NEGATIVE_INFINITY; targetMin = Double.POSITIVE_INFINITY; for(int i = 0; i < dataSet.size(); i++) { double val = dataSet.getTargetValue(i); targetMax = Math.max(targetMax, val); targetMin = Math.min(targetMin, val); } targetMultiplier = ((f.max()-targetBump)-(f.min()+targetBump))/(targetMax-targetMin); inputSize = dataSet.getNumNumericalVars(); outputSize = 1; Random rand = RandomUtil.getRandom(); setUp(rand); trainNN(dataSet); } @Override public boolean supportsWeightedData() { return false; } @Override public BackPropagationNet clone() { return new BackPropagationNet(this); } /** * The neural network needs an activation function for the neurons that is * used to predict from inputs and train the network by propagating the * errors back through the network. */ public static abstract class ActivationFunction implements Function1D, Serializable { private static final long serialVersionUID = 8002040194215453918L; /** * Computes the response of the response of this activation function on * the given input value * @param x the input value * @return the response value */ abstract public double response(double x); /** * The minimum possible response value * @return the min value */ abstract public double min(); /** * The maximum possible response value * @return the max value */ abstract public double max(); /** * Returns the function object for the derivative of this activation * function. The derivative must be calculated using only the output of * the response when given the original input. Meaning: given an input * {@code x}, the value of f'(x) must be computable as g(f(x)) * * @return the function for computing the derivative of the response */ abstract public Function1D getD(); @Override public double f(double x) { return response(x); } } /** * The logit activation function. This function goes from [0, 1]. It has * more difficultly learning than symmetric activation functions, often * requiring considerably more layers and neurons than other activation * functions. */ public static final ActivationFunction logitActiv = new ActivationFunction() { private static final long serialVersionUID = -5675881412853268432L; @Override public double response(double x) { return 1 / (1+exp(-x)); } @Override public double min() { return 0; } @Override public double max() { return 1; } @Override public Function1D getD() { return logitPrime; } @Override public String toString() { return "Logit"; } }; private static final Function1D logitPrime = (double x) -> { return x * (1 - x); }; /** * The tanh activation function. This function is symmetric in the range of * [-1, 1]. It works well for many problems in general. */ public static final ActivationFunction tanhActiv = new ActivationFunction() { private static final long serialVersionUID = 5531922338473526216L; @Override public double response(double x) { return tanh(x); } @Override public double min() { return -1; } @Override public double max() { return 1; } @Override public Function1D getD() { return (x)-> 1-x*x; } @Override public String toString() { return "Tanh"; } }; /** * The softsign activation function. This function is symmetric in the range * of [-1, 1]. It works well for classification problems, and is very fast * to compute. It sometimes requires more neurons to learn more complicated * functions / boundaries. It sometimes has reduced performance in regression */ public static final ActivationFunction softsignActiv = new ActivationFunction() { private static final long serialVersionUID = 1618447580574194519L; @Override public double response(double x) { return x/(1.0 + abs(x)); } @Override public double min() { return -1; } @Override public double max() { return 1; } @Override public Function1D getD() { return (x) -> Math.pow(1-abs(x), 2); } @Override public String toString() { return "Softsign"; } }; /** * Creates the weights for the hidden layers and output layer * @param rand source of randomness */ private void setUp(Random rand) { Ws = new ArrayList<>(npl.length); bs = new ArrayList<>(npl.length); //First Hiden layer takes input raw DenseMatrix W = new DenseMatrix(npl[0], inputSize); Vec b = new DenseVector(W.rows()); initializeWeights(W, rand); initializeWeights(b, W.cols(), rand); Ws.add(W); bs.add(b); //Other Hiden Layers Layers for(int i = 1; i < npl.length; i++) { W = new DenseMatrix(npl[i], npl[i-1]); b = new DenseVector(W.rows()); initializeWeights(W, rand); initializeWeights(b, W.cols(), rand); Ws.add(W); bs.add(b); } //Output layer W = new DenseMatrix(outputSize, npl[npl.length-1]); b = new DenseVector(W.rows()); initializeWeights(W, rand); initializeWeights(b, W.cols(), rand); Ws.add(W); bs.add(b); } /** * Computes the delta between the networks output for a same and its true value * @param dataSet the data set we are learning from * @param idx the index into the data set for the current data point * @param delta_out the place to store the delta, may already be initialized with random noise * @param a_i the activation of the final output layer for the data point * @param d_i the derivative of the activation of the final output layer * @return the error that occurred in predicting this data point */ private double computeOutputDelta(DataSet dataSet, final int idx, Vec delta_out, Vec a_i, Vec d_i) { double error = 0; if (dataSet instanceof ClassificationDataSet) { ClassificationDataSet cds = (ClassificationDataSet) dataSet; final int ct = cds.getDataPointCategory(idx); for (int i = 0; i < outputSize; i++) if (i == ct) delta_out.set(i, f.max() - targetBump); else delta_out.set(i, f.min() + targetBump); for (int j = 0; j < delta_out.length(); j++) { double val = delta_out.get(j); error += pow((val - a_i.get(j)), 2); val = -(val - a_i.get(j)) * d_i.get(j); delta_out.set(j, val); } } else if(dataSet instanceof RegressionDataSet) { RegressionDataSet rds = (RegressionDataSet) dataSet; double val = rds.getTargetValue(idx); val = f.min()+targetBump + targetMultiplier*(val-targetMin); error += pow((val - a_i.get(0)), 2); delta_out.set(0, -(val - a_i.get(0)) * d_i.get(0)); } else { throw new RuntimeException("BUG: please report"); } return error; } /** * Feeds a vector through the network to get an output * @param input the input to feed forward though the network * @param activations the list of allocated vectors to store the activation * outputs for each layer * @param derivatives the list of allocated vectors to store the derivatives * of the activations */ private void feedForward(Vec input, List<Vec> activations, List<Vec> derivatives) { Vec x = input; for(int i = 0; i < Ws.size(); i++) { Matrix W_i = Ws.get(i); Vec b_i = bs.get(i); Vec a_i = activations.get(i); a_i.zeroOut(); W_i.multiply(x, 1, a_i); a_i.mutableAdd(b_i); a_i.applyFunction(f); Vec d_i = derivatives.get(i); a_i.copyTo(d_i); d_i.applyFunction(f.getD()); x = a_i; } } /** * Feeds an input through the network * @param inputthe input vector to feed in * @return the output vector for the given input at the final layer */ private Vec feedForward(Vec input) { Vec x = input; for(int i = 0; i < Ws.size(); i++) { Matrix W_i = Ws.get(i); Vec b_i = bs.get(i); Vec a_i = W_i.multiply(x); a_i.mutableAdd(b_i); a_i.applyFunction(f); x = a_i; } return x; } private void initializeWeights(Matrix W, Random rand) { for(int i = 0; i < W.rows(); i++) for(int j = 0; j < W.cols(); j++) W.set(i, j, weightInitialization.getWeight(W.cols(), W.rows(), initialLearningRate, rand)); } private void initializeWeights(Vec b, int inputSize, Random rand) { for(int i = 0; i < b.length(); i++) b.set(i, weightInitialization.getWeight(inputSize, b.length(), initialLearningRate, rand)); } @Override public List<Parameter> getParameters() { ArrayList<Parameter> params = new ArrayList<Parameter>(Parameter.getParamsFromMethods(this)); for(int i = 0; i < npl.length; i++) { final int ii = i; if(npl[ii] < 1) throw new ArithmeticException("There must be a poistive number of hidden neurons in each layer"); params.add(new IntParameter() { private static final long serialVersionUID = -827784019950722754L; @Override public int getValue() { return npl[ii]; } @Override public boolean setValue(int val) { if(val <= 0) return false; npl[ii] = val; return true; } @Override public String getASCIIName() { return "Neurons for Hidden Layer " + ii; } }); } params.add(new ObjectParameter<ActivationFunction>() { private static final long serialVersionUID = 6871130865935243583L; @Override public ActivationFunction getObject() { return getActivationFunction(); } @Override public boolean setObject(ActivationFunction obj) { setActivationFunction(obj); return true; } @Override public List parameterOptions() { return Arrays.asList(logitActiv, tanhActiv, softsignActiv); } @Override public String getASCIIName() { return "Activation Function"; } }); return Collections.unmodifiableList(params); } @Override public Parameter getParameter(String paramName) { return Parameter.toParameterMap(getParameters()).get(paramName); } }
29,829
29.976116
130
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/DReDNetSimple.java
package jsat.classifiers.neuralnetwork; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.ExecutorService; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.classifiers.neuralnetwork.activations.ActivationLayer; import jsat.classifiers.neuralnetwork.activations.ReLU; import jsat.classifiers.neuralnetwork.activations.SoftmaxLayer; import jsat.classifiers.neuralnetwork.initializers.ConstantInit; import jsat.classifiers.neuralnetwork.initializers.GaussianNormalInit; import jsat.classifiers.neuralnetwork.regularizers.Max2NormRegularizer; import jsat.linear.SparseVector; import jsat.linear.Vec; import jsat.math.optimization.stochastic.AdaDelta; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.utils.IntList; import jsat.utils.ListUtils; import jsat.utils.concurrent.ParallelUtils; /** * This class provides a neural network based on Geoffrey Hinton's * <b>D</b>eep <b>Re</b>ctified <b>D</b>ropout <b>N</b>ets. It is parameterized * to be "simpler" in that the default batch size and gradient updating method * should require no tuning to get decent results<br> * <br> * NOTE: Training neural networks is computationally expensive, you may want to * consider a GPU implementation from another source. * * @author Edward Raff */ public class DReDNetSimple implements Classifier, Parameterized { private static final long serialVersionUID = -342281027279571332L; private SGDNetworkTrainer network; private int[] hiddenSizes; private int batchSize = 256; private int epochs = 100; /** * Creates a new DRedNet that uses two hidden layers with 1024 neurons each. * A batch size of 256 and 100 epochs will be used. */ public DReDNetSimple() { this(1024, 1024); } /** * Create a new DReDNet that uses the specified number of hidden layers. A * batch size of 256 and 100 epochs will be used. * @param hiddenLayerSizes the length indicates the number of hidden layers, * and the value in each index is the number of neurons in that layer */ public DReDNetSimple(int... hiddenLayerSizes) { setHiddenSizes(hiddenLayerSizes); } /** * Sets the hidden layer sizes for this network. The size of the array is * the number of hidden layers and the value in each index denotes the size * of that layer. * @param hiddenSizes */ public void setHiddenSizes(int[] hiddenSizes) { for(int i = 0; i < hiddenSizes.length; i++) if(hiddenSizes[i] <= 0) throw new IllegalArgumentException("Hidden layer " + i + " must contain a positive number of neurons, not " + hiddenSizes[i]); this.hiddenSizes = Arrays.copyOf(hiddenSizes, hiddenSizes.length); } /** * * @return the array of hidden layer sizes */ public int[] getHiddenSizes() { return hiddenSizes; } /** * Sets the batch size for updates * @param batchSize the number of items to compute the gradient from */ public void setBatchSize(int batchSize) { this.batchSize = batchSize; } /** * * @return the number of data points to use for one gradient computation */ public int getBatchSize() { return batchSize; } /** * Sets the number of epochs to perform * @param epochs the number of training iterations through the whole data * set */ public void setEpochs(int epochs) { if(epochs <= 0) throw new IllegalArgumentException("Number of epochs must be positive"); this.epochs = epochs; } /** * * @return the number of training iterations through the data set */ public int getEpochs() { return epochs; } @Override public CategoricalResults classify(DataPoint data) { Vec x = data.getNumericalValues(); Vec y = network.feedfoward(x); return new CategoricalResults(y.arrayCopy()); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { setup(dataSet); List<Vec> X = dataSet.getDataVectors(); List<Vec> Y = new ArrayList<Vec>(dataSet.size()); for(int i = 0; i < dataSet.size(); i++) { SparseVector sv = new SparseVector(dataSet.getClassSize(), 1); sv.set(dataSet.getDataPointCategory(i), 1.0); Y.add(sv); } IntList randOrder = new IntList(X.size()); ListUtils.addRange(randOrder, 0, X.size(), 1); List<Vec> Xmini = new ArrayList<>(batchSize); List<Vec> Ymini = new ArrayList<>(batchSize); ExecutorService threadPool = ParallelUtils.getNewExecutor(parallel); for(int epoch = 0; epoch < epochs; epoch++) { long start = System.currentTimeMillis(); double epochError = 0; Collections.shuffle(randOrder); for(int i = 0; i < X.size(); i+=batchSize) { int to = Math.min(i+batchSize, X.size()); Xmini.clear(); Ymini.clear(); for(int j = i; j < to; j++) { Xmini.add(X.get(j)); Ymini.add(Y.get(j)); } double localErr; if(parallel) localErr = network.updateMiniBatch(Xmini, Ymini, threadPool); else localErr = network.updateMiniBatch(Xmini, Ymini); epochError += localErr; } long end = System.currentTimeMillis(); // System.out.println("Epoch " + epoch + " had error " + epochError + " took " + (end-start)/1000.0 + " seconds"); } network.finishUpdating(); } private void setup(ClassificationDataSet dataSet) { network = new SGDNetworkTrainer(); int[] sizes = new int[hiddenSizes.length+2]; sizes[0] = dataSet.getNumNumericalVars(); for(int i = 0; i < hiddenSizes.length; i++) sizes[i+1] = hiddenSizes[i]; sizes[sizes.length-1] = dataSet.getClassSize(); network.setLayerSizes(sizes); List<ActivationLayer> activations = new ArrayList<>(hiddenSizes.length+2); for(int size : hiddenSizes) activations.add(new ReLU()); activations.add(new SoftmaxLayer()); network.setLayersActivation(activations); network.setRegularizer(new Max2NormRegularizer(25)); network.setWeightInit(new GaussianNormalInit(1e-2)); network.setBiasInit(new ConstantInit(0.1)); network.setEta(1.0); network.setGradientUpdater(new AdaDelta()); network.setup(); } @Override public boolean supportsWeightedData() { return false; } @Override public DReDNetSimple clone() { DReDNetSimple clone = new DReDNetSimple(hiddenSizes); if(this.network != null) clone.network = this.network.clone(); clone.batchSize = this.batchSize; clone.epochs = this.epochs; return clone; } }
7,462
31.168103
142
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/LVQ.java
package jsat.classifiers.neuralnetwork; import static java.lang.Math.max; import static java.lang.Math.min; import java.util.*; import jsat.classifiers.*; import jsat.clustering.SeedSelectionMethods; import jsat.clustering.SeedSelectionMethods.SeedSelection; import jsat.linear.Vec; import jsat.linear.VecPaired; import jsat.linear.distancemetrics.DistanceMetric; import jsat.linear.distancemetrics.TrainableDistanceMetric; import jsat.linear.vectorcollection.DefaultVectorCollection; import jsat.linear.vectorcollection.VectorCollection; import jsat.math.decayrates.*; import jsat.parameters.*; import jsat.utils.random.RandomUtil; /** * Learning Vector Quantization (LVQ) is an algorithm that extends {@link SOM} * to take advantage of label information to perform classification. It creates * a number of representatives, or learning vectors, for each class. The LVs are * then updated iteratively to push away from the wrong class and pull closer to * the correct class. LVQ is equivalent to a type of 2 layer neural network. * * @author Edward Raff */ public class LVQ implements Classifier, Parameterized { private static final long serialVersionUID = -3911765006048793222L; /** * The default number of iterations is {@value #DEFAULT_ITERATIONS} */ public static final int DEFAULT_ITERATIONS = 200; /** * The default learning rate {@value #DEFAULT_LEARNING_RATE} */ public static final double DEFAULT_LEARNING_RATE = 0.1; /** * The default eps distance factor between the two wining vectors * {@value #DEFAULT_EPS} */ public static final double DEFAULT_EPS = 0.3; /** * The default scaling factor for the {@link LVQVersion#LVQ3} case is * {@value #DEFAULT_MSCALE} */ public static final double DEFAULT_MSCALE = (0.5-0.1)/2+0.1; /** * The default method of LVQ to use LVQ3 */ public static final LVQVersion DEFAULT_LVQ_METHOD = LVQVersion.LVQ3; /** * The default number of representatives per class is * {@value #DEFAULT_REPS_PER_CLASS} */ public static final int DEFAULT_REPS_PER_CLASS = 3; /** * The default stopping distance for convergence is * {@value #DEFAULT_STOPPING_DIST} */ public static final double DEFAULT_STOPPING_DIST = 1e-3; /** * The default seed selection method is SeedSelection.KPP */ public static final SeedSelection DEFAULT_SEED_SELECTION= SeedSelection.KPP; private DecayRate learningDecay; private int iterations; private double learningRate; /** * The distance metric to use */ protected DistanceMetric dm; private LVQVersion lvqVersion; private double eps; private double mScale; private double stoppingDist; private int representativesPerClass; /** * Array containing the learning vectors */ protected Vec[] weights; /** * Array of the class that each learning vector represents */ protected int[] weightClass; /** * Records the number of times each neuron won and was off the correct class * during training. Neurons that end with a count of zero wins will be ignored */ protected int[] wins; private SeedSelectionMethods.SeedSelection seedSelection; /** * Contains the Learning vectors paired with their index in the weights array */ protected VectorCollection<VecPaired<Vec, Integer>> vc; /** * Creates a new LVQ instance * @param dm the distance metric to use * @param iterations the number of iterations to perform */ public LVQ(DistanceMetric dm, int iterations) { this(dm, iterations, DEFAULT_LEARNING_RATE, DEFAULT_REPS_PER_CLASS); } /** * Creates a new LVQ instance * @param dm the distance metric to use * @param iterations the number of iterations to perform * @param learningRate the learning rate to use when updating * @param representativesPerClass the number of representatives to create * for each class */ public LVQ(DistanceMetric dm, int iterations, double learningRate, int representativesPerClass) { this(dm, iterations, learningRate, representativesPerClass, DEFAULT_LVQ_METHOD, new ExponetialDecay()); } /** * Creates a new LVQ instance * @param dm the distance metric to use * @param iterations the number of iterations to perform * @param learningRate the learning rate to use when updating * @param representativesPerClass the number of representatives to create * for each class * @param lvqVersion the version of LVQ to use * @param learningDecay the amount of decay to apply to the learning rate */ public LVQ(DistanceMetric dm, int iterations, double learningRate, int representativesPerClass, LVQVersion lvqVersion, DecayRate learningDecay) { setLearningDecay(learningDecay); setIterations(iterations); setLearningRate(learningRate); setDistanceMetric(dm); setLVQMethod(lvqVersion); setEpsilonDistance(DEFAULT_EPS); setMScale(DEFAULT_MSCALE); setSeedSelection(DEFAULT_SEED_SELECTION); setVecCollection(new DefaultVectorCollection<>()); setRepresentativesPerClass(representativesPerClass); } /** * Copy Constructor * @param toCopy version to copy */ protected LVQ(LVQ toCopy) { this(toCopy.dm.clone(), toCopy.iterations, toCopy.learningRate, toCopy.representativesPerClass, toCopy.lvqVersion, toCopy.learningDecay); if(toCopy.weights != null) { wins = Arrays.copyOf(toCopy.wins, toCopy.wins.length); weights = new Vec[toCopy.weights.length]; weightClass = Arrays.copyOf(toCopy.weightClass, toCopy.weightClass.length); for(int i = 0; i < toCopy.weights.length; i++) this.weights[i] = toCopy.weights[i].clone(); } setEpsilonDistance(toCopy.eps); setMScale(toCopy.getMScale()); setSeedSelection(toCopy.getSeedSelection()); if(toCopy.vc != null) this.vc = toCopy.vc.clone(); setVecCollection(toCopy.vc.clone()); } /** * When using {@link LVQVersion#LVQ3}, a 3rd case exists where up to two * learning vectors can be updated at the same time if they have the same * class. To avoid over fitting, an additional regularizing weight is placed * upon the learning rate for their update. THis sets the additional * multiplied. It is suggested to use a value in the range of [0.1, 0.5] * * @param mScale the multiplication factor to apply to the learning vectors */ public void setMScale(double mScale) { if(mScale <= 0 || Double.isInfinite(mScale) || Double.isNaN(mScale)) throw new ArithmeticException("Scale factor must be a positive constant, not " + mScale); this.mScale = mScale; } /** * Returns the scale used for the LVQ 3 learning algorithm update set. * @return a scale used during LVQ3 */ public double getMScale() { return mScale; } /** * Sets the epsilon multiplier that controls the maximum distance two * learning vectors can be from each other in order to be updated at the * same time. If they are too far apart, only one can be updated. It is * recommended to use a value in the range [0.1, 0.3] * * @param eps the scale factor of the maximum distance for two learning * vectors to be updated at the same time */ public void setEpsilonDistance(double eps) { if(eps <= 0 || Double.isInfinite(eps) || Double.isNaN(eps)) throw new ArithmeticException("eps factor must be a positive constant, not " + eps); this.eps = eps; } /** * Sets the epsilon scale distance between learning vectors that may be * allowed to two at a time. * * @return the scale of the allowable distance between learning vectors when * updating */ public double getEpsilonDistance() { return eps; } /** * Sets the learning rate of the algorithm. It should be set in accordance * with {@link #setLearningDecay(jsat.math.decayrates.DecayRate) }. * * @param learningRate the learning rate to use */ public void setLearningRate(double learningRate) { if(learningRate <= 0 || Double.isInfinite(learningRate) || Double.isNaN(learningRate)) throw new ArithmeticException("learning rate must be a positive constant, not " + learningRate); this.learningRate = learningRate; } /** * Returns the learning rate at which to apply updates during the algorithm. * * @return the learning rate to use */ public double getLearningRate() { return learningRate; } /** * Sets the decay rate to apply to the learning rate. * * @param learningDecay the rate to decay the learning rate */ public void setLearningDecay(DecayRate learningDecay) { this.learningDecay = learningDecay; } /** * Returns the method used to decay the learning rate over each iteration * @return the decay rate used at each iteration */ public DecayRate getLearningDecay() { return learningDecay; } /** * Sets the number of learning iterations that will occur. * * @param iterations the number of iterations for the algorithm to use */ public void setIterations(int iterations) { if(iterations < 0) throw new ArithmeticException("Can not perform a negative number of iterations"); this.iterations = iterations; } /** * Returns the number of iterations of the algorithm to apply * @return the number of iterations to perform */ public int getIterations() { return iterations; } /** * Sets the number of representatives to create for each class. It is * possible to have an unbalanced number of representatives per class, but * that is not currently supported. Increasing the number of representatives * per class increases the complexity of the decision boundary that can be * learned. * * @param representativesPerClass the number of representatives to create * for each class */ public void setRepresentativesPerClass(int representativesPerClass) { this.representativesPerClass = representativesPerClass; } /** * Returns the number of representatives to create for each class. * @return the number of representatives to create for each class. */ public int getRepresentativesPerClass() { return representativesPerClass; } /** * Sets the version of LVQ used. * * @param lvqMethod the version of LVQ to use */ public void setLVQMethod(LVQVersion lvqMethod) { this.lvqVersion = lvqMethod; } /** * Returns the version of the LVQ algorithm to use. * @return the version of the LVQ algorithm to use. */ public LVQVersion getLVQMethod() { return lvqVersion; } /** * Sets the distance used for learning * @param dm the distance metric to use */ public void setDistanceMetric(DistanceMetric dm) { this.dm = dm; } /** * Returns the distance metric to use * @return the distance metric to use */ public DistanceMetric getDistanceMetric() { return dm; } /** * The algorithm terminates early if the learning vectors are only moving * small distances. The stopping distance is the minimum distance that one * of the learning vectors must move for the algorithm to continue. * * @param stoppingDist the minimum distance for each learning vector to move */ public void setStoppingDist(double stoppingDist) { if(stoppingDist < 0 || Double.isInfinite(stoppingDist) || Double.isNaN(stoppingDist)) throw new ArithmeticException("stopping dist must be a zero or positive constant, not " + stoppingDist); this.stoppingDist = stoppingDist; } /** * Returns the stopping distance used to terminate the algorithm early * @return the stopping distance used toe nd the algorithm early */ public double getStoppingDist() { return stoppingDist; } /** * Sets the seed selection method used to select the initial learning vectors * @param seedSelection the method of initialing LVQ */ public void setSeedSelection(SeedSelection seedSelection) { this.seedSelection = seedSelection; } /** * Returns the method of seed selection used * @return the method of seed selection used */ public SeedSelection getSeedSelection() { return seedSelection; } /** * There are several LVQ versions, each one adding an additional case in * which two LVs instead of one can be updated. */ public enum LVQVersion { /** * LVQ1 will only update one LV */ LVQ1, /** * Two vectors will be updated if they are close enough together. The * closest was the wrong class but the 2nd closet was the correct class. */ LVQ2, /** * Two vectors will be updated if they are close enough together and do * not belong to the same class if one of them was the correct class to * a training vector. */ LVQ21, /** * Two vectors will be updated if they are close enough together and are * of the same class as the training vector. */ LVQ3 } /** * Sets the vector collection factory to use when storing the final learning vectors * @param vcf the vector collection factory to use */ public void setVecCollection(VectorCollection<VecPaired<Vec, Integer>> vcf) { this.vc = vcf; } @Override public CategoricalResults classify(DataPoint data) { CategoricalResults cr = new CategoricalResults(weightClass.length/representativesPerClass); int index = vc.search(data.getNumericalValues(), 1).get(0).getVector().getPair(); cr.setProb(weightClass[index], 1.0); return cr; } /** * Returns true if the two distance values are within an acceptable epsilon * ratio of each other. * @param minDist the first distance * @param minDist2 the second distance * @return <tt>true</tt> if the are acceptable close */ protected boolean epsClose(double minDist, double minDist2) { return min(minDist/minDist2, minDist2/minDist) > (1 - eps) && max(minDist/minDist2, minDist2/minDist) < (1 + eps); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { TrainableDistanceMetric.trainIfNeeded(dm, dataSet, parallel); Random rand = RandomUtil.getRandom(); int classCount = dataSet.getPredicting().getNumOfCategories(); weights = new Vec[classCount*representativesPerClass]; Vec[] weightsPrev = new Vec[weights.length]; weightClass = new int[weights.length]; wins = new int[weights.length]; //Generate weights that are hopefully close to their final positions int curClass = 0; int curPos = 0; while(curClass < classCount) { List<DataPoint> origSubList = dataSet.getSamples(curClass); List<DataPointPair<Integer>> subList = new ArrayList<>(origSubList.size()); for(DataPoint dp : origSubList) subList.add(new DataPointPair<>(dp, curClass)); ClassificationDataSet subSet = new ClassificationDataSet(subList, dataSet.getPredicting()); List<Vec> classSeeds = SeedSelectionMethods.selectIntialPoints(subSet, representativesPerClass, dm, rand, seedSelection); for(Vec v : classSeeds) { weights[curPos] = v.clone(); weightsPrev[curPos] = weights[curPos].clone(); weightClass[curPos++] = curClass; } curClass++; } Vec tmp = weights[0].clone(); for(int iteration = 0; iteration < iterations; iteration++) { for(int j = 0; j < weights.length; j++) weights[j].copyTo(weightsPrev[j]); Arrays.fill(wins, 0); double alpha = learningDecay.rate(iteration, iterations, learningRate); for(int i = 0; i < dataSet.size(); i++) { Vec x = dataSet.getDataPoint(i).getNumericalValues(); int closestClass = -1; int minDistIndx = 0, minDistIndx2 = 0; double minDist = Double.POSITIVE_INFINITY, minDist2 = Double.POSITIVE_INFINITY; for(int j = 0; j < weights.length; j++) { double dist = dm.dist(x, weights[j]); if(dist < minDist) { if(lvqVersion == LVQVersion.LVQ2) { minDist2 = minDist; minDistIndx2 = minDistIndx; } minDist = dist; minDistIndx = j; closestClass = dataSet.getDataPointCategory(i); } } if (lvqVersion.ordinal() >= LVQVersion.LVQ2.ordinal() && weightClass[minDistIndx] != weightClass[minDistIndx2] && closestClass == weightClass[minDistIndx2] && epsClose(minDist, minDist2)) {//Update both vectors //Move the closest farther away x.copyTo(tmp); tmp.mutableSubtract(weights[minDistIndx]); weights[minDistIndx].mutableSubtract(alpha, tmp); //And the 2nd closest closer x.copyTo(tmp); tmp.mutableSubtract(weights[minDistIndx2]); weights[minDistIndx2].mutableAdd(alpha, tmp); wins[minDistIndx2]++; } else if (lvqVersion.ordinal() >= LVQVersion.LVQ21.ordinal() && weightClass[minDistIndx] != weightClass[minDistIndx2] && closestClass == weightClass[minDistIndx] && epsClose(minDist, minDist2)) {//Update both vectors //Move the closest closer x.copyTo(tmp); tmp.mutableSubtract(weights[minDistIndx]); weights[minDistIndx].mutableAdd(alpha, tmp); wins[minDistIndx]++; //And the 2nd closest farther away x.copyTo(tmp); tmp.mutableSubtract(weights[minDistIndx2]); weights[minDistIndx2].mutableSubtract(alpha, tmp); } else if (lvqVersion.ordinal() >= LVQVersion.LVQ3.ordinal() && weightClass[minDistIndx] == weightClass[minDistIndx2] && min(minDist/minDist2, minDist2/minDist) > (1-eps)*(1+eps)) {//Update both vectors in the same direction x.copyTo(tmp); tmp.mutableSubtract(weights[minDistIndx]); weights[minDistIndx].mutableAdd(mScale*alpha, tmp); x.copyTo(tmp); tmp.mutableSubtract(weights[minDistIndx2]); weights[minDistIndx2].mutableAdd(mScale*alpha, tmp); wins[minDistIndx]++; wins[minDistIndx2]++; } else //Base case, can only update one vector { x.copyTo(tmp); tmp.mutableSubtract(weights[minDistIndx]); if(closestClass == weightClass[minDistIndx])//Move closer to the right class { wins[minDistIndx]++; weights[minDistIndx].mutableAdd(alpha, tmp); } else//Move farther away { weights[minDistIndx].mutableSubtract(alpha, tmp); } } } //Check for early convergence boolean stopEarly = true; for(int j = 0; j < weights.length; j++) if(stopEarly && dm.dist(weights[j], weightsPrev[j]) > stoppingDist) stopEarly = false; if(stopEarly) break; } List<VecPaired<Vec, Integer>> finalLVs = new ArrayList<VecPaired<Vec, Integer>>(weights.length); for(int i = 0; i < weights.length; i++) if(wins[i] == 0) continue; else finalLVs.add(new VecPaired<Vec, Integer>(weights[i], i)); vc.build(parallel, finalLVs, dm); } @Override public boolean supportsWeightedData() { return false; } @Override public LVQ clone() { return new LVQ(this); } }
21,824
34.258481
116
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/LVQLLC.java
package jsat.classifiers.neuralnetwork; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ExecutorService; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.classifiers.DataPointPair; import jsat.classifiers.PriorClassifier; import jsat.classifiers.bayesian.MultivariateNormals; import jsat.linear.Vec; import jsat.linear.VecPaired; import jsat.linear.distancemetrics.DistanceMetric; import jsat.math.decayrates.DecayRate; /** * LVQ with Locally Learned Classifier (LVQ-LLC) is an adaption of the LVQ algorithm * I have come up with. Given a classification data set, LVQ develops and moves * several prototype vectors throughout the space, trying to place them as good * representatives. Classification is then done Nearest Neighbor style among the * prototypes. <br> * LVQ-LLC trains a local classifier on all of the data points that belong to * the prototype, and the data points that lie across the border but are still * near the prototype using the {@link #getEpsilonDistance() } parameter that is used to * update two prototypes at the same time. Classification can then be done by * getting the Classifier for the nearest prototype, or averaging the results of * the two closest prototypes if the point is near a boundary. <br> * This is done because, given a complex decision boundary and a sufficient * number of prototypes, each prototype's domain will be a smaller subset of the * problem and will hopefully resemble a simpler decision problem that can be * solved by a less complicated local learner. <br> * LVQ-LLC has the following advantages over LVQ: * <ul> * <li>Can return probabilities instead of hard classifications</li> * <li>Approximate decision boundaries can be more complicated than voronoi diagrams</li> * <li>Increase accuracy given a smaller number of prototypes per class</li> * </ul> * <br> * By default, the local classifier is the {@link MultivariateNormals}. * * @author Edward Raff */ public class LVQLLC extends LVQ { private static final long serialVersionUID = 3602640001545233744L; private Classifier localClassifier; private Classifier[] localClassifeirs; /** * Creates a new LVQ-LLC instance that uses {@link MultivariateNormals} as * the local classifier. * @param dm the distance metric to use * @param iterations the number of iterations to perform */ public LVQLLC(DistanceMetric dm, int iterations) { this(dm, iterations, new MultivariateNormals(true)); } /** * Creates a new LVQ-LLC instance * @param dm the distance metric to use * @param iterations the number of iterations to perform * @param localClasifier the classifier to use as a local classifier for each prototype */ public LVQLLC(DistanceMetric dm, int iterations, Classifier localClasifier) { super(dm, iterations); setLocalClassifier(localClasifier); } /** * Creates a new LVQ-LLC instance * @param dm the distance metric to use * @param iterations the number of iterations to perform * @param localClasifier the classifier to use as a local classifier for each prototype * @param learningRate the learning rate to use when updating * @param representativesPerClass the number of representatives to create * for each class */ public LVQLLC(DistanceMetric dm, int iterations, Classifier localClasifier, double learningRate, int representativesPerClass) { super(dm, iterations, learningRate, representativesPerClass); setLocalClassifier(localClasifier); } /** * Creates a new LVQ-LLC instance * @param dm the distance metric to use * @param iterations the number of iterations to perform * @param localClasifier the classifier to use as a local classifier for each prototype * @param learningRate the learning rate to use when updating * @param representativesPerClass the number of representatives to create * for each class * @param lvqVersion the version of LVQ to use * @param learningDecay the amount of decay to apply to the learning rate */ public LVQLLC(DistanceMetric dm, int iterations, Classifier localClasifier, double learningRate, int representativesPerClass, LVQVersion lvqVersion, DecayRate learningDecay) { super(dm, iterations, learningRate, representativesPerClass, lvqVersion, learningDecay); setLocalClassifier(localClasifier); } protected LVQLLC(LVQLLC toCopy) { super(toCopy); if(toCopy.localClassifier != null) this.localClassifier = toCopy.localClassifier.clone(); if(toCopy.localClassifeirs != null) { this.localClassifeirs = new Classifier[toCopy.localClassifeirs.length]; for(int i = 0; i < this.localClassifeirs.length; i++) this.localClassifeirs[i] = toCopy.localClassifeirs[i].clone(); } } /** * Each prototype will create a classifier that is local to itself, and * trained on the points that belong to the prototype and those near the * border of the prototype. This sets the classifier that will be used * * @param localClassifier the local classifier to use for each prototype */ public void setLocalClassifier(Classifier localClassifier) { this.localClassifier = localClassifier; } /** * Returns the classifier used for each prototype * @return the classifier used for each prototype */ public Classifier getLocalClassifier() { return localClassifier; } @Override public CategoricalResults classify(DataPoint data) { List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> nns = vc.search(data.getNumericalValues(), 2); double d1 = nns.get(0).getPair(); int index1 = nns.get(0).getVector().getPair(); double d2 = nns.get(1).getPair(); int index2 = nns.get(1).getVector().getPair(); CategoricalResults r1 = localClassifeirs[index1].classify(data); if(getLVQMethod().ordinal() >= LVQVersion.LVQ2.ordinal() && epsClose(d1, d2)) { CategoricalResults result = new CategoricalResults(r1.size()); CategoricalResults r2 = localClassifeirs[index2].classify(data); double distSum = d1+d2; for(int i = 0; i < r1.size(); i++) { result.incProb(i, r1.getProb(i)*(distSum-d1)); result.incProb(i, r2.getProb(i)*(distSum-d2)); } result.normalize(); return result; } else return r1; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { super.train(dataSet, parallel); List<List<DataPointPair<Integer>>> listOfLocalPoints = new ArrayList<>(weights.length); for (int i = 0; i < weights.length; i++) listOfLocalPoints.add(new ArrayList<>(wins[i] * 3 / 2)); for (DataPointPair<Integer> dpp : dataSet.getAsDPPList()) { Vec x = dpp.getVector(); int minDistIndx = 0, minDistIndx2 = 0; double minDist = Double.POSITIVE_INFINITY, minDist2 = Double.POSITIVE_INFINITY; List<? extends VecPaired<VecPaired<Vec, Integer>, Double>> closestWeightVecs = vc.search(x, 2); VecPaired<VecPaired<Vec, Integer>, Double> closest = closestWeightVecs.get(0); minDistIndx = closest.getVector().getPair(); minDist = closest.getPair(); VecPaired<VecPaired<Vec, Integer>, Double> closest2nd = closestWeightVecs.get(0); minDistIndx2 = closest2nd.getVector().getPair(); minDist2 = closest2nd.getPair(); listOfLocalPoints.get(minDistIndx).add(dpp); double tmpEps = getEpsilonDistance(); if(Math.min(minDist/minDist2, minDist2/minDist) > (1 - tmpEps) && Math.max(minDist/minDist2, minDist2/minDist) < (1 + tmpEps)) { listOfLocalPoints.get(minDistIndx2).add(dpp); } } localClassifeirs = new Classifier[weights.length]; for(int i = 0; i < weights.length; i++) { if(wins[i] == 0) continue; ClassificationDataSet localSet = new ClassificationDataSet(listOfLocalPoints.get(i), dataSet.getPredicting()); if(wins[i] < 10) { CategoricalResults cr = new CategoricalResults(dataSet.getPredicting().getNumOfCategories()); cr.setProb(weightClass[i], 1.0); localClassifeirs[i] = new PriorClassifier(cr); } else { localClassifeirs[i] = localClassifier.clone(); localClassifeirs[i].train(localSet); } } } @Override public LVQLLC clone() { return new LVQLLC(this); } }
9,254
38.892241
177
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/Perceptron.java
package jsat.classifiers.neuralnetwork; import jsat.SingleWeightVectorModel; import jsat.classifiers.BaseUpdateableClassifier; import jsat.classifiers.CategoricalData; import jsat.classifiers.CategoricalResults; import jsat.classifiers.DataPoint; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.exceptions.FailedToFitException; import jsat.linear.DenseVector; import jsat.linear.Vec; /** * The perceptron is a simple algorithm that attempts to find a hyperplane that * separates two classes. It may find any possible separating plane, and there * are no guarantees when the data is not linearly separable. * <br> * It is equivalent to a single node Neural Network, and is related to SVMs * * * @author Edward Raff */ public class Perceptron extends BaseUpdateableClassifier implements BinaryScoreClassifier, SingleWeightVectorModel { private static final long serialVersionUID = -3605237847981632020L; private double learningRate; private double bias; private Vec weights; /** * Creates a new Perceptron learner */ public Perceptron() { this(0.1, 20); } /** * Creates a new Perceptron learner * * @param learningRate the rate at which to incorporate the change of errors * into the model * @param iteratinLimit the maximum number of iterations to perform when converging */ public Perceptron(double learningRate, int iteratinLimit) { if(learningRate <= 0 || learningRate > 1) throw new RuntimeException("Preceptron learning rate must be in the range (0,1]"); this.learningRate = learningRate; setEpochs(epochs); } @Override public CategoricalResults classify(DataPoint data) { CategoricalResults cr = new CategoricalResults(2); cr.setProb(output(data), 1); return cr; } @Override public double getScore(DataPoint dp) { return weights.dot(dp.getNumericalValues()) + bias; } @Override public void setUp(CategoricalData[] categoricalAttributes, int numericAttributes, CategoricalData predicting) { if(predicting.getNumOfCategories() != 2) throw new FailedToFitException("Perceptrion is for binary problems only"); weights = new DenseVector(numericAttributes); bias = 0; } @Override public void update(DataPoint dataPoint, double weight, int targetClass) { if(classify(dataPoint).mostLikely() == targetClass) return;//nothing to do //else, error double c = (targetClass*2-1)*learningRate; weights.mutableAdd(c, dataPoint.getNumericalValues()); bias += c; } private int output(DataPoint input) { double dot = getScore(input); return (dot >= 0) ? 1 : 0; } @Override public boolean supportsWeightedData() { return true; } @Override public Vec getRawWeight() { return weights; } @Override public double getBias() { return bias; } @Override public Vec getRawWeight(int index) { if(index < 1) return getRawWeight(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public double getBias(int index) { if (index < 1) return getBias(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public int numWeightsVecs() { return 1; } @Override public Perceptron clone() { Perceptron copy = new Perceptron(learningRate, epochs); if(this.weights != null) copy.weights = this.weights.clone(); copy.bias = this.bias; return copy; } }
3,831
24.546667
114
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/RBFNet.java
package jsat.classifiers.neuralnetwork; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Random; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicIntegerArray; import jsat.DataSet; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.classifiers.svm.DCDs; import jsat.clustering.kmeans.HamerlyKMeans; import jsat.clustering.SeedSelectionMethods; import jsat.datatransform.DataTransform; import jsat.distributions.Distribution; import jsat.distributions.Uniform; import jsat.distributions.discrete.UniformDiscrete; import jsat.exceptions.FailedToFitException; import jsat.linear.DenseVector; import jsat.linear.SparseVector; import jsat.linear.Vec; import jsat.linear.distancemetrics.DistanceMetric; import jsat.linear.distancemetrics.EuclideanDistance; import jsat.linear.distancemetrics.MahalanobisDistance; import jsat.math.OnLineStatistics; import jsat.parameters.Parameterized; import jsat.regression.RegressionDataSet; import jsat.regression.Regressor; import jsat.utils.BoundedSortedList; import jsat.utils.DoubleList; import jsat.utils.IntSet; import jsat.utils.concurrent.ParallelUtils; import jsat.utils.random.RandomUtil; /** * This provides a highly configurable implementation of a Radial Basis Function * Neural Network. A RBF network is a type of neural network that contains one * hidden layer, and is related to the {@link LVQ} algorithm. In a classical RBF * Network, the distance between two data points is generally the * {@link EuclideanDistance} or {@link MahalanobisDistance}. This implementation * allows the specification of any arbitrary distance metric. <br> * <br> * Another restriction on classical RBF Nets is that a weighted sum of the * output of the hidden units be used to make the final decision. Instead this * implementation allows the specification of an arbitrary Classifier or * Regressor to estimate the outputs based on the hidden unit activations. * Whether or not the predictor supports Classification, Regression, and what * classification features it supports - will determine what the RBF Network * supports. This allows for models technically more complicated and powerful * than the standard RBF network. <br> * <br> * The initial phases of a RBF Network is to learn the neuron locations and * activations. This part can also be seen as learning a data transformation. As * such, the RBF Network can be used as a DataTransform itself. <br> * The last phase of the network is to learn the model based on the data point * activations. <br> * <br> * It is highly recommended to use a base learning method that can efficiently * use sparse vectors. * * @author Edward Raff */ public class RBFNet implements Classifier, Regressor, DataTransform, Parameterized { private static final long serialVersionUID = 5418896646203518062L; private int numCentroids; private Phase1Learner p1l; private Phase2Learner p2l; private double alpha; private int p; private DistanceMetric dm; private boolean normalize = true; private Classifier baseClassifier; private Regressor baseRegressor; private List<Double> centroidDistCache; private List<Vec> centroids; private double[] bandwidths; /** * Creates a new RBF Network suitable for binary classification or * regression and uses 100 hidden nodes. One of the other constructors * should be used if you need classification for multi-class or if you need * probability outputs. <br> * <br> * This will use {@link Phase1Learner#K_MEANS} for neuron selection and * {@link Phase2Learner#NEAREST_OTHER_CENTROID_AVERAGE} for activation * tuning. The {@link EuclideanDistance} will be use as the metric. * */ public RBFNet() { this(100); } /** * Creates a new RBF Network suitable for binary classification or * regression. One of the other constructors should be used if you need * classification for multi-class or if you need probability outputs. <br> * <br> * This will use {@link Phase1Learner#K_MEANS} for neuron selection and * {@link Phase2Learner#NEAREST_OTHER_CENTROID_AVERAGE} for activation tuning. * The {@link EuclideanDistance} will be use as the metric. * * @param numCentroids the number of centroids or neurons to use in the * network's hidden layer */ public RBFNet(int numCentroids) { this(numCentroids, Phase1Learner.K_MEANS, Phase2Learner.NEAREST_OTHER_CENTROID_AVERAGE, 3, 3, new EuclideanDistance(), (Classifier) new DCDs()); } /** * Creates a new RBF Network for classification tasks. If the classifier can * also perform regression, then the network will be able to perform both. * * @param numCentroids the number of centroids or neurons to use in the * network's hidden layer * @param cl the method to learn the neuron locations * @param bl the method to learn the neuron activations * @param alpha a parameter that may have an effect on the neuron activation * learning method. * @param p a parameter that may have an effect on the neuron activation * learning method * @param dm the distance metric to use * @param baseClassifier the base classifier to learn on top of the hidden * layer activations. */ public RBFNet(int numCentroids, Phase1Learner cl, Phase2Learner bl, double alpha, int p, DistanceMetric dm, Classifier baseClassifier) { setNumCentroids(numCentroids); setPhase1Learner(cl); setPhase2Learner(bl); setAlpha(alpha); setP(p); setDistanceMetric(dm); this.baseClassifier = baseClassifier; if(baseClassifier instanceof Regressor) baseRegressor = (Regressor) baseClassifier; } /** * Creates a new RBF Network for regression tasks. If the regressor can * also perform classification, then the network will be able to perform * both. * * @param numCentroids the number of centroids or neurons to use in the * network's hidden layer * @param cl the method to learn the neuron locations * @param bl the method to learn the neuron activations * @param alpha a parameter that may have an effect on the neuron activation * learning method. * @param p a parameter that may have an effect on the neuron activation * learning method * @param dm the distance metric to use * @param baseRegressor the base regressor to learn on op of the hidden * layer activations. */ public RBFNet(int numCentroids, Phase1Learner cl, Phase2Learner bl, double alpha, int p, DistanceMetric dm, Regressor baseRegressor) { setNumCentroids(numCentroids); setPhase1Learner(cl); setPhase2Learner(bl); setAlpha(alpha); setP(p); setDistanceMetric(dm); this.baseRegressor = baseRegressor; if(baseRegressor instanceof Classifier) baseClassifier = (Classifier) baseRegressor; } /** * Copy constructor * @param toCopy the network to copy */ public RBFNet(RBFNet toCopy) { setNumCentroids(toCopy.getNumCentroids()); setPhase1Learner(toCopy.getPhase1Learner()); setPhase2Learner(toCopy.getPhase2Learner()); setAlpha(toCopy.getAlpha()); setP(toCopy.getP()); setDistanceMetric(toCopy.getDistanceMetric().clone()); if(toCopy.baseRegressor != null) { this.baseRegressor = toCopy.baseRegressor.clone(); if(baseRegressor instanceof Classifier) baseClassifier = (Classifier) baseRegressor; } else if(toCopy.baseClassifier != null) { this.baseClassifier = toCopy.baseClassifier.clone(); if(baseClassifier instanceof Regressor) baseRegressor = (Regressor) baseClassifier; } if(toCopy.centroids != null) { this.centroids = new ArrayList<Vec>(toCopy.centroids.size()); for(Vec v : toCopy.centroids) this.centroids.add(v.clone()); if(toCopy.centroidDistCache != null) this.centroidDistCache = new DoubleList(toCopy.centroidDistCache); } if(toCopy.bandwidths != null) this.bandwidths = Arrays.copyOf(toCopy.bandwidths, toCopy.bandwidths.length); } @Override public DataPoint transform(DataPoint dp) { final Vec x = dp.getNumericalValues(); final List<Double> qi = dm.getQueryInfo(x); Vec sv = new SparseVector(numCentroids); double sum = 0; /* * Keep track of the highest activation in case none of the neurons have * a numericaly stable activation value. if this occurs we will do our * best by simply setting the one largest activation */ double maxActivation = Double.NEGATIVE_INFINITY; int highestNeuron = -1; for(int i = 0; i < centroids.size(); i++) { double dist = dm.dist(i, x, qi, centroids, centroidDistCache); double sig = bandwidths[i]; double activation = Math.exp(-(dist*dist)/(sig*sig*2)); if(activation > maxActivation) { maxActivation = activation; highestNeuron = i; } if(activation > 1e-16) { sv.set(i, activation); sum += activation; } } if(sv.nnz() == 0)//no activations { sv.set(highestNeuron, maxActivation); sum = maxActivation; } if(normalize && sum != 0.0)//-0.0 not an issue with rbf kernel sv.mutableDivide(sum); if(sv.nnz() > sv.length()/2)//at this point we would be using more memory than needed. Just switch to dense sv = new DenseVector(sv); return new DataPoint(sv, dp.getCategoricalValues(), dp.getCategoricalData()); } /** * The first phase of learning a RBF Neural Network is to determine the * neuron locations. This enum controls which method is used. */ public static enum Phase1Learner { /** * Selects the Neurons at random from the training data */ RANDOM { @Override protected List<Vec> getCentroids(DataSet data, int centroids, DistanceMetric dm, boolean parallel) { Random rand = RandomUtil.getRandom(); List<Vec> toRet = new ArrayList<>(); Set<Integer> points = new IntSet(); while (points.size() < centroids) points.add(rand.nextInt(data.size())); for (int i : points) toRet.add(data.getDataPoint(i).getNumericalValues()); return toRet; } }, /** * Selects the Neurons by performing k-Means clustering on the data */ K_MEANS { @Override protected List<Vec> getCentroids(DataSet data, int centroids, DistanceMetric dm, boolean parallel) { HamerlyKMeans kmeans = new HamerlyKMeans(dm, SeedSelectionMethods.SeedSelection.KPP); kmeans.cluster(data, centroids, parallel); return kmeans.getMeans(); } }; /** * Obtains the centroids for the given data set * @param data the data set to get the centroids for * @param centroids the number of centroids to obtain * @param dm the distance metric that is being used * @param parallel the source of threads for parallel computation * @return the java.util.List<jsat.linear.Vec> */ abstract protected List<Vec> getCentroids(DataSet data, int centroids, DistanceMetric dm, boolean parallel); } /** * The second phase of learning a RBF Neural Network is to determine how the * neurons are activated to produce the output of the hidden layer. This * enum control which method is used. */ public static enum Phase2Learner { /** * This method sets the bandwidth for each neuron based on the distances * to the neuron from each data point that is closest to said neuron. If * &mu; is the average distance to the neuron, and &sigma; the standard * deviation, then the bandwidth <i>b</i> of the <i>j</i>'th neuron is * seto to <i>b<sub>j</sub> = &mu;<sub>j</sub> + * {@link #setAlpha(double) &alpha;} &sigma;<sub>j</sub></i> */ CENTROID_DISTANCE { @Override protected double[] estimateBandwidths(double alpha, int p, DataSet data, final List<Vec> centroids, final List<Double> centroidDistCache, final DistanceMetric dm, ExecutorService threadpool) { final double[] bandwidths = new double[centroids.size()]; final OnLineStatistics[] averages = new OnLineStatistics[bandwidths.length]; for(int i = 0; i < averages.length; i++) averages[i] = new OnLineStatistics(); List<Vec> X = data.getDataVectors(); ParallelUtils.run(true, data.size(), (start, end) -> { final OnLineStatistics[] localAverages = new OnLineStatistics[bandwidths.length]; for (int i = 0; i < localAverages.length; i++) localAverages[i] = new OnLineStatistics(); for (int z = start; z < end; z++) { Vec x = X.get(z); double minDist = Double.POSITIVE_INFINITY; int minI = 0; for (int i = 0; i < centroids.size(); i++) { double dist = dm.dist(i, x, centroids, centroidDistCache); if (dist < minDist) { minDist = dist; minI = i; } } localAverages[minI].add(minDist); } synchronized(averages) { for (int i = 0; i < localAverages.length; i++) { if (localAverages[i].getSumOfWeights() == 0) continue; averages[i] = OnLineStatistics.add(averages[i], localAverages[i]); } } }, threadpool); for (int i = 0; i < bandwidths.length; i++) bandwidths[i] = averages[i].getMean() + averages[i].getStandardDeviation() * alpha; return bandwidths; } }, /** * This bandwidth estimator only works for classification problems. Each * neuron is assigned a class based on the majority class labels of the * data points closes to said neuron. The bandwidth is then estimated as * {@link #setAlpha(double) &alpha;} times the distance from the neuron * to the closest neuron with a different class label.<br> * <br> * For this method &alpha; values between (0, 1) usually work best, 0.25 is a * good starting value. The value of &alpha; can go past 1. */ CLOSEST_OPPOSITE_CENTROID { @Override protected double[] estimateBandwidths(final double alpha, int p, DataSet data, final List<Vec> centroids, final List<Double> centroidDistCache, final DistanceMetric dm, ExecutorService threadpool) { final ClassificationDataSet cds; if(data instanceof ClassificationDataSet ) cds = (ClassificationDataSet) data; else throw new FailedToFitException("CLOSEST_OPPOSITE_CENTROID only works for classification data sets"); final double[] bandwidths = new double[centroids.size()]; /** * An array of arrays. Each centroid gets its own atomic array, * where each value indicates how many objects of class is stored. */ final AtomicIntegerArray[] classLabels = new AtomicIntegerArray[centroids.size()]; for(int i =0; i < classLabels.length; i++) classLabels[i] = new AtomicIntegerArray(cds.getClassSize()); ParallelUtils.run(true, data.size(), (start, end)-> { for(int id = start; id < end; id++) { final Vec x = cds.getDataPoint(id).getNumericalValues(); double minDist = Double.POSITIVE_INFINITY; int minI = 0; for (int i = 0; i < centroids.size(); i++) { double dist = dm.dist(i, x, centroids, centroidDistCache); if (dist < minDist) { minDist = dist; minI = i; } } classLabels[minI].incrementAndGet(cds.getDataPointCategory(id)); } }, threadpool); //Figure out the class label for each neuron final int[] neuronClass = new int[centroids.size()]; for(int i = 0; i < neuronClass.length; i++) { int maxVal = -1; int maxClass = 0; for(int j = 0; j < classLabels[i].length(); j++) { if(classLabels[i].get(j) > maxVal) { maxClass = j; maxVal = classLabels[i].get(j); } } neuronClass[i] = maxClass; } //Now set the bandwidth based on the distance to the nearest centroid with a different class label ParallelUtils.run(true, centroids.size(), (center) -> { double minDist = Double.POSITIVE_INFINITY; for (int i = 0; i < centroids.size(); i++) if (neuronClass[center] != neuronClass[i])//dont check for ourselves b/c we have the same class as ourselves, so no need minDist = Math.min(minDist, dm.dist(i, center, centroids, centroidDistCache)); if (Double.isInfinite(minDist))//possible if there is high class imbalance, run again but lie for (int i = 0; i < centroids.size(); i++) if (center != i) minDist = Math.min(minDist, dm.dist(i, center, centroids, centroidDistCache)); bandwidths[center] = alpha * minDist; }, threadpool); return bandwidths; } }, /** * This method sets the bandwidth for each neuron based on the average * distance of the {@link #setP(int) p} nearest neurons. The number of * standard deviations to add to the activation is controlled by * {@link #setAlpha(double) &alpha;} */ NEAREST_OTHER_CENTROID_AVERAGE { @Override protected double[] estimateBandwidths(final double alpha, final int p, DataSet data, final List<Vec> centroids, final List<Double> centroidDistCache, final DistanceMetric dm, ExecutorService threadpool) { final double[] bandwidths = new double[centroids.size()]; final CountDownLatch latch = new CountDownLatch(centroids.size()); ParallelUtils.run(true, centroids.size(), (center)-> { BoundedSortedList<Double> closestDistances = new BoundedSortedList<>(p); for (int i = 0; i < centroids.size(); i++) if (i != center) closestDistances.add(dm.dist(i, center, centroids, centroidDistCache)); OnLineStatistics stats = new OnLineStatistics(); for (double dist : closestDistances) stats.add(dist); bandwidths[center] = stats.getMean() + alpha * stats.getStandardDeviation(); }, threadpool); return bandwidths; } }; abstract protected double[] estimateBandwidths(double alpha, int p, final DataSet data, final List<Vec> centroids, final List<Double> centroidDistCache, final DistanceMetric dm, ExecutorService threadpool); } /** * Sets the alpha parameter. This value is used for certain * {@link Phase2Learner} learners as a parameter. A good default value for * most methods is often 1 or 3. However the parameter must always be * a non-negative value. * * @param alpha a non negative value that controls the width of the learned * bandwidths. */ public void setAlpha(double alpha) { if(alpha < 0 || Double.isInfinite(alpha) || Double.isNaN(alpha)) throw new IllegalArgumentException("Alpha must be a positive value, not " + alpha); this.alpha = alpha; } /** * Returns the alpha bandwidth learning parameter * @return the alpha bandwidth learning parameter * @see #setAlpha(double) */ public double getAlpha() { return alpha; } /** * Guesses the distribution for the {@link #setAlpha(double) } parameter * @param data the data to create a guess for * @return a guess for the distribution of the Alpha parameter */ public static Distribution guessAlpha(DataSet data) { return new Uniform(0.8, 3.5); } /** * Sets the nearest neighbor parameter. This value is used for certain * {@link Phase2Learner} learners as a parameter. It is used to control the * number of neighbors taken into account in learning the parameter value. * It must always be a positive value. 3 is usually a good value for * this parameter. * * @param p the positive integer used that controls the width of the learned * bandwidths */ public void setP(int p) { if(p < 1) throw new IllegalArgumentException("neighbors parameter must be positive, not "+p); this.p = p; } /** * Returns the nearest neighbors parameter. * @return the nearest neighbors parameter. * @see #setP(int) */ public int getP() { return p; } /** * Guesses the distribution for the {@link #setP(int) } parameter * @param data the data to create a guess for * @return a guess for the distribution of the P parameter */ public static Distribution guessP(DataSet data) { return new UniformDiscrete(2, 5); } /** * Sets the number of centroids to learn for this model. Increasing the * number of centroids increases the complexity of the model as well as * training and evaluation time. The centroids serve as the hidden units in * the network. * <br><br> * The centroids learned are controlled via the * {@link #setPhase1Learner(jsat.classifiers.neuralnetwork.RBFNet.Phase1Learner)} * method * * @param numCentroids the number of centroids to use in the model */ public void setNumCentroids(int numCentroids) { if(numCentroids < 1) throw new IllegalArgumentException("Number of centroids must be positive, not " + numCentroids); this.numCentroids = numCentroids; } /** * Returns the number of centroids to use when training * @return * Returns the number of centroids to use when training */ public int getNumCentroids() { return numCentroids; } /** * Guesses the distribution for the {@link #setNumCentroids(int) } parameter * @param data the data to create a guess for * @return a guess for the distribution of the number of centroids to use */ public static Distribution guessNumCentroids(DataSet data) { return new UniformDiscrete(25, 1000);//maybe change in the future } /** * Sets the distance metric used to determine neuron activations. * @param dm the distance metric to use */ public void setDistanceMetric(DistanceMetric dm) { this.dm = dm; } /** * Returns the distance metric in use * @return the distance metric in use */ public DistanceMetric getDistanceMetric() { return dm; } /** * Sets the method used for learning the centroids (or hidden units) of the * network. * * @param p1l the learning method to use */ public void setPhase1Learner(Phase1Learner p1l) { this.p1l = p1l; } /** * Returns the method to use for learning the centroids of the network. * * @return the method to use for learning the centroids of the network. */ public Phase1Learner getPhase1Learner() { return p1l; } /** * Sets the method used for learning the bandwidths for each centroid in the * network. Depending on the method used, {@link #setAlpha(double) } or * {@link #setP(int)} may impact the learned bandwidths. * * @param p2l the learning method to use */ public void setPhase2Learner(Phase2Learner p2l) { this.p2l = p2l; } /** * Returns the learning method to use for determining the bandwidths of each * center in the network. * @return the learning method to use for the bandwidths */ public Phase2Learner getPhase2Learner() { return p2l; } /** * Sets whether or not to normalize the outputs of the neurons in the * network so that the activations sum to one. Normalizing the outputs can * increase the generalization ability of the network. By default this is * set to {@code true} * * @param normalize {@code true} to normalize the neuron outputs, * {@code false} to use the raw activation values. */ public void setNormalize(boolean normalize) { this.normalize = normalize; } /** * Returns whether or not the network is currently normalizing its neuron * outputs. * * @return whether or not the neuron outputs are normalized */ public boolean isNormalize() { return normalize; } @Override public CategoricalResults classify(DataPoint data) { return baseClassifier.classify(transform(data)); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { if(baseClassifier == null) throw new FailedToFitException("RBFNet was not given a base classifier"); ExecutorService threadPool = ParallelUtils.getNewExecutor(parallel); //Learn Centroids centroids = p1l.getCentroids(dataSet, numCentroids, dm, parallel); centroidDistCache = dm.getAccelerationCache(centroids, parallel); //Learn Parameter Values bandwidths = p2l.estimateBandwidths(alpha, p, dataSet, centroids, centroidDistCache, dm, threadPool); //apply transform ClassificationDataSet transformedData = dataSet.shallowClone(); transformedData.applyTransform(this, parallel); //learn final model on transformed inputs baseClassifier.train(transformedData, parallel); } @Override public boolean supportsWeightedData() { if(baseClassifier != null) return baseClassifier.supportsWeightedData(); else return baseRegressor.supportsWeightedData(); } @Override public double regress(DataPoint data) { return baseRegressor.regress(transform(data)); } @Override public void fit(DataSet data) { if (data instanceof ClassificationDataSet) train((ClassificationDataSet) data); else if(data instanceof RegressionDataSet) train((RegressionDataSet) data); else throw new FailedToFitException("Data must be a classifiation or regression dataset, not " + data.getClass().getSimpleName()); } @Override public void train(RegressionDataSet dataSet, boolean parallel) { if(baseRegressor == null) throw new FailedToFitException("RBFNet was not given a base classifier"); ExecutorService threadPool = ParallelUtils.getNewExecutor(parallel); //Learn Centroids centroids = p1l.getCentroids(dataSet, numCentroids, dm, parallel); centroidDistCache = dm.getAccelerationCache(centroids, parallel); //Learn Parameter Values bandwidths = p2l.estimateBandwidths(alpha, p, dataSet, centroids, centroidDistCache, dm, threadPool); //apply transform RegressionDataSet transformedData = dataSet.shallowClone(); transformedData.applyTransform(this, parallel); //learn final model on transformed inputs baseRegressor.train(transformedData, parallel); } @Override public RBFNet clone() { return new RBFNet(this); } }
30,513
37.821883
214
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/SGDNetworkTrainer.java
package jsat.classifiers.neuralnetwork; import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Random; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.logging.Level; import java.util.logging.Logger; import jsat.classifiers.neuralnetwork.activations.ActivationLayer; import jsat.classifiers.neuralnetwork.initializers.BiastInitializer; import jsat.classifiers.neuralnetwork.initializers.WeightInitializer; import jsat.classifiers.neuralnetwork.regularizers.Max2NormRegularizer; import jsat.classifiers.neuralnetwork.regularizers.WeightRegularizer; import jsat.linear.DenseMatrix; import jsat.linear.DenseVector; import jsat.linear.Matrix; import jsat.linear.Vec; import jsat.math.decayrates.DecayRate; import jsat.math.decayrates.NoDecay; import jsat.math.optimization.stochastic.GradientUpdater; import jsat.math.optimization.stochastic.SimpleSGD; import jsat.utils.SystemInfo; import jsat.utils.random.RandomUtil; import jsat.utils.random.XORWOW; /** * This class provides a highly configurable and generalized method of training * a neural network using Stochastic Gradient Decent.<br> * <br> * Note, the API of this class may change in the future. * * @author Edward Raff */ public class SGDNetworkTrainer implements Serializable { private static final long serialVersionUID = 5753653181230693131L; /** * An array where the length indicates the number of layers and the value of * each index indicates the number of neurons in that layer. This includes * both the input and output layers */ private int[] layerSizes; /** * The base learning rate to use */ private double eta; /** * The dropout probability for the input layers */ private double p_i; /** * The integer threshold to used when sampling a value from * {@link Random#nextInt() } to get the correct dropout probability */ private int p_i_intThresh; /** * The dropout probability for the hidden layers. */ private double p_o; /** * The integer threshold to used when sampling a value from * {@link Random#nextInt() } to get the correct dropout probability */ private int p_o_intThresh; /** * The gradient updater to use for updating weights and biases */ private GradientUpdater updater = new SimpleSGD(); /** * The weight regularization method */ private WeightRegularizer regularizer = new Max2NormRegularizer(15); /** * The method to initialize all neuron connection weights from */ private WeightInitializer weightInit; /** * The method to initialize all neuron bias values from */ private BiastInitializer biasInit; /** * This list contains the neuron weight connection matrix for each layer * after the input layer */ private List<Matrix> W; /** * This list contains the gradients to update the weight matrices by */ private List<Matrix> W_deltas; /** * This list contains the gradient updaters used for each layer, where there * is a list of each matrix and each matrix has a list for each row. */ private List<List<GradientUpdater>> W_updaters; /** * This list contains the neuron bias connections for each layer after the * input layer */ private List<Vec> B; /** * This list contains the gradients to update the weight biases by */ private List<Vec> B_deltas; /** * This list contains the gradient updaters used for each set of bias * connections */ private List<GradientUpdater> B_updaters; /** * This list contains the activation method for each layer after the input * layer */ private List<ActivationLayer> layersActivation; /** * The decay rate to apply to the base learning rate */ private DecayRate etaDecay = new NoDecay(); /** * The time step, incremented after every mini batch */ private int time; /** * Matrices for storing the activations of each layer */ private Matrix[] activations; private Matrix[] unactivated; private Matrix[] deltas; /** * Creates a new SGD network training that uses dropout */ public SGDNetworkTrainer() { setDropoutInput(0.2); setDropoutHidden(0.5); } /** * Copy constructor * @param toCopy the object to copy */ public SGDNetworkTrainer(SGDNetworkTrainer toCopy) { this.layerSizes = Arrays.copyOf(toCopy.layerSizes, toCopy.layerSizes.length); this.eta = toCopy.eta; this.weightInit = toCopy.weightInit.clone(); this.biasInit = toCopy.biasInit.clone(); this.regularizer = toCopy.regularizer.clone(); this.updater = toCopy.updater.clone(); this.setDropoutInput(toCopy.getDropoutInput()); this.setDropoutHidden(toCopy.getDropoutHidden()); if(toCopy.W != null) { this.W = new ArrayList<Matrix>(); for(Matrix w : toCopy.W) this.W.add(w.clone()); this.B = new ArrayList<Vec>(); for(Vec b : toCopy.B) this.B.add(b.clone()); } if(toCopy.W_deltas != null) { this.W_deltas = new ArrayList<Matrix>(); for(Matrix w : toCopy.W_deltas) this.W_deltas.add(w.clone()); this.B_deltas = new ArrayList<Vec>(); for(Vec b : toCopy.B_deltas) this.B_deltas.add(b.clone()); } if(toCopy.W_updaters != null) { this.W_updaters = new ArrayList<List<GradientUpdater>>(); for(List<GradientUpdater> updaters : toCopy.W_updaters) { List<GradientUpdater> copyUpdaters = new ArrayList<GradientUpdater>(updaters.size()); this.W_updaters.add(copyUpdaters); for(GradientUpdater item : updaters) copyUpdaters.add(item.clone()); } this.B_updaters = new ArrayList<GradientUpdater>(toCopy.B_updaters); for(GradientUpdater item : toCopy.B_updaters) this.B_updaters.add(item.clone()); } this.layersActivation = new ArrayList<ActivationLayer>(toCopy.layersActivation.size()); for(ActivationLayer activation : toCopy.layersActivation) this.layersActivation.add(activation.clone()); } /** * Sets the probability of dropping a value from the input layer * @param p the probability in [0, 1) of dropping a value in the input layer */ public void setDropoutInput(double p) { if(p < 0 || p >= 1 || Double.isNaN(p)) throw new IllegalArgumentException("Dropout probability must be in [0,1) not " + p); p_i = p; p_i_intThresh = (int) (0xffffffffL*p_i+Integer.MIN_VALUE); } /** * * @return the dropout probability for the input layer */ public double getDropoutInput() { return p_i; } /** * Sets the probability of dropping a value from the hidden layer * @param p the probability in [0, 1) of dropping a value in the hidden * layer */ public void setDropoutHidden(double p) { if(p < 0 || p >= 1 || Double.isNaN(p)) throw new IllegalArgumentException("Dropout probability must be in [0,1) not " + p); p_o = p; p_o_intThresh = (int) (0xffffffffL*p_o+Integer.MIN_VALUE); } /** * * @return the dropout probability for the hidden layers */ public double getDropoutHidden() { return p_o; } /** * Sets the decay rate on the global learning rate over time * @param etaDecay the decay rate to use */ public void setEtaDecay(DecayRate etaDecay) { this.etaDecay = etaDecay; } /** * * @return the decay rate in use */ public DecayRate getEtaDecay() { return etaDecay; } /** * Sets the base global learning rate. * @param eta the learning rate to use */ public void setEta(double eta) { if(eta <= 0 || Double.isNaN(eta) || Double.isInfinite(eta)) throw new IllegalArgumentException("eta must be a positive constant, not " + eta); this.eta = eta; } /** * * @return the global learning rate used */ public double getEta() { return eta; } /** * Sets the method of regularizing the connections weights * @param regularizer the method of regularizing the network */ public void setRegularizer(WeightRegularizer regularizer) { this.regularizer = regularizer; } /** * * @return the regularizer for the network */ public WeightRegularizer getRegularizer() { return regularizer; } /** * Sets the array indicating the total number of layers in the network and * the sizes of each layer. The length of the array is the number of layers * and the value at each index is the size of that layer. * @param layerSizes the array of layer sizes */ public void setLayerSizes(int... layerSizes) { this.layerSizes = layerSizes; } /** * * @return the array of layer sizes in the network */ public int[] getLayerSizes() { return layerSizes; } /** * Sets the list of layer activations for all layers other than the input * layer. * @param layersActivation the list of hidden and output layer activations */ public void setLayersActivation(List<ActivationLayer> layersActivation) { this.layersActivation = layersActivation; } /** * Sets the gradient update that will be used when updating the weight * matrices and bias terms. * @param updater the updater to use */ public void setGradientUpdater(GradientUpdater updater) { this.updater = updater; } /** * * @return the gradient updater used */ public GradientUpdater getGradientUpdater() { return updater; } /** * Sets the method used to initialize matrix connection weights * @param weightInit the weight initialization method */ public void setWeightInit(WeightInitializer weightInit) { this.weightInit = weightInit; } /** * * @return the weight initialization method */ public WeightInitializer getWeightInit() { return weightInit; } /** * Sets the method to use when initializing neuron bias values * @param biasInit the bias initialization method */ public void setBiasInit(BiastInitializer biasInit) { this.biasInit = biasInit; } /** * * @return the bias initialization method */ public BiastInitializer getBiasInit() { return biasInit; } /** * Prepares the network by creating all needed structure, initializing * weights, and preparing it for updates */ public void setup() { assert (layersActivation.size() == layerSizes.length-1); W = new ArrayList<Matrix>(layersActivation.size()); B = new ArrayList<Vec>(layersActivation.size()); Random rand = RandomUtil.getRandom(); for(int l = 1; l < layerSizes.length; l++) { W.add(new DenseMatrix(layerSizes[l], layerSizes[l-1])); weightInit.init(W.get(W.size()-1), rand); B.add(new DenseVector(layerSizes[l])); biasInit.init(B.get(B.size()-1), layerSizes[l-1], rand); } time = 0; prepareForUpdating(); } /** * This method assumes that the neural network structure is already in * place, and prepares only the structure needed to perform updates. <br> * Any gradient related information that was being used before (such as * momentum when performing updates) will be lost */ private void prepareForUpdating() { W_deltas = new ArrayList<Matrix>(layersActivation.size()); W_updaters = new ArrayList<List<GradientUpdater>>(layersActivation.size()); B_deltas = new ArrayList<Vec>(layersActivation.size()); B_updaters = new ArrayList<GradientUpdater>(layersActivation.size()); for(int l = 1; l < layerSizes.length; l++) { W_deltas.add(new DenseMatrix(layerSizes[l], layerSizes[l-1])); B_deltas.add(new DenseVector(layerSizes[l])); //updaters List<GradientUpdater> W_updaters_l = new ArrayList<GradientUpdater>(layerSizes[l]); for(int i = 0; i < layerSizes[l]; i++) { GradientUpdater W_updater = updater.clone(); W_updater.setup(layerSizes[l-1]); W_updaters_l.add(W_updater); } W_updaters.add(W_updaters_l); B_updaters.add(updater.clone()); B_updaters.get(B_updaters.size()-1).setup(layerSizes[l]); } activations = new Matrix[layersActivation.size()]; unactivated = new Matrix[layersActivation.size()]; deltas = new Matrix[layersActivation.size()]; } /** * Calling this method indicates that the user has no intentions of updating * the network again and is ready to use it for prediction. This will remove * objects not needed for prediction and do cleanup. */ public void finishUpdating() { W_deltas = null; W_updaters = null; B_deltas = null; B_updaters = null; activations = unactivated = deltas = null; W.get(0).mutableMultiply(1.0-p_i); B.get(0).mutableMultiply(1.0-p_i); for(int i = 1; i < W.size(); i++) { W.get(i).mutableMultiply(1.0-p_o); B.get(i).mutableMultiply(1.0-p_o); } } /** * Performs a mini-batch update of the network using the given input and * output pairs * @param x the list of input values * @param y the list of output values * @return the error incurred on the given mini batch */ public double updateMiniBatch(List<Vec> x, List<Vec> y) { return updateMiniBatch(x, y, null); } /** * Performs a mini-batch update of the network using the given input and * output pairs * @param x the list of input values * @param y the list of output values * @param ex the source of threads for parallel computation, may be * {@code null} * @return the error incurred on the given mini batch */ public double updateMiniBatch(List<Vec> x, List<Vec> y, ExecutorService ex) { Random rand = RandomUtil.getRandom(); for(Matrix w : W_deltas) w.zeroOut(); for(Vec b : B_deltas) b.zeroOut(); for(int i = 0; i < layersActivation.size(); i++) { //TODO isntead of making a whole new matrix every time, use a submatrix when bigger and enlarge when too small if(activations[i] == null || activations[i].cols() != x.size()) activations[i] = new DenseMatrix(layerSizes[i+1], x.size()); if(unactivated[i] == null || unactivated[i].cols() != x.size()) unactivated[i] = new DenseMatrix(layerSizes[i+1], x.size()); if(deltas[i] == null || deltas[i].cols() != x.size()) deltas[i] = new DenseMatrix(layerSizes[i+1], x.size()); } Matrix X = new DenseMatrix(layerSizes[0], x.size()); for (int j = 0; j < x.size(); j++) x.get(j).copyTo(X.getColumnView(j)); if(p_i > 0) applyDropout(X, p_i_intThresh, rand, ex); double errorMade = 0; feedforward(X, activations, unactivated, ex, rand); errorMade = backpropagateError(deltas, activations, x, y, errorMade, ex, unactivated); accumulateUpdates(X, activations, deltas, ex, x); double eta_cur = etaDecay.rate(time++, eta); if(ex == null) applyGradient(eta_cur); else applyGradient(eta_cur, ex); return errorMade; } private void feedforward(Matrix X, Matrix[] activationsM, Matrix[] unactivatedM, ExecutorService ex, Random rand) { //feed forward for (int l = 0; l < layersActivation.size(); l++) { final Matrix a_lprev = (l == 0 ? X : activationsM[l - 1]); final Matrix a_l = activationsM[l]; final Matrix z_l = unactivatedM[(l)]; z_l.zeroOut(); if(ex == null) W.get(l).multiply(a_lprev, z_l); else W.get(l).multiply(a_lprev, z_l, ex); //add the bias term back in final Vec B_l = B.get(l); if (ex == null) { for (int i = 0; i < z_l.rows(); i++) { final double B_li = B_l.get(i); for (int j = 0; j < z_l.cols(); j++) z_l.increment(i, j, B_li); } } else { final CountDownLatch latch = new CountDownLatch(SystemInfo.LogicalCores); for (int id = 0; id < SystemInfo.LogicalCores; id++) { final int ID = id; ex.submit(new Runnable() { @Override public void run() { for (int i = ID; i < z_l.rows(); i += SystemInfo.LogicalCores) { final double B_li = B_l.get(i); for (int j = 0; j < z_l.cols(); j++) z_l.increment(i, j, B_li); } latch.countDown(); } }); } try { latch.await(); } catch (InterruptedException ex1) { Logger.getLogger(SGDNetworkTrainer.class.getName()).log(Level.SEVERE, null, ex1); } } if (p_o > 0 && l != layersActivation.size() - 1) applyDropout(z_l, p_o_intThresh, rand, ex); layersActivation.get(l).activate(z_l, a_l, false); } } /** * Feeds the given singular pattern through the network and computes its * activations * @param x the input vector to feed forward through the network * @return the final activation for this network */ public Vec feedfoward(Vec x) { Vec a_lprev = x; for (int l = 0; l < layersActivation.size(); l++) { Vec z_l = new DenseVector(layerSizes[l+1]); z_l.zeroOut(); W.get(l).multiply(a_lprev, 1.0, z_l); //add the bias term back in final Vec B_l = B.get(l); z_l.mutableAdd(B_l); layersActivation.get(l).activate(z_l, z_l); a_lprev = z_l; } return a_lprev; } private double backpropagateError(Matrix[] deltasM, Matrix[] activationsM, List<Vec> x, List<Vec> y, double errorMade, ExecutorService ex, Matrix[] unactivatedM) { //backpropagate the error for (int l = layersActivation.size() - 1; l >= 0; l--) { Matrix delta_l = deltasM[l]; if (l == layersActivation.size() - 1)//output layer { activationsM[(l)].copyTo(delta_l); for(int r = 0; r < x.size(); r++) { delta_l.getColumnView(r).mutableSubtract(y.get(r)); errorMade += delta_l.getColumnView(r).pNorm(2); } } else//any other layer { delta_l.zeroOut(); if(ex == null) W.get(l+1).transposeMultiply(deltasM[l+1], delta_l); else W.get(l+1).transposeMultiply(deltasM[l+1], delta_l, ex); layersActivation.get(l).backprop(unactivatedM[l], activationsM[l], delta_l, delta_l, false); } } return errorMade; } private void accumulateUpdates(Matrix X, Matrix[] activationsM, Matrix[] deltasM, ExecutorService ex, final List<Vec> x) { final double invXsize = 1.0/x.size(); //accumulate updates for (int l = 0; l < layersActivation.size(); l++) { final Matrix a_lprev = (l == 0 ? X : activationsM[(l - 1)]); final Matrix delta_l = deltasM[l]; if(ex == null) delta_l.multiplyTranspose(a_lprev, W_deltas.get(l)); else delta_l.multiplyTranspose(a_lprev, W_deltas.get(l), ex); W_deltas.get(l).mutableMultiply(invXsize); final Vec B_delta_l = B_deltas.get(l); if(ex == null) for(int i = 0; i < delta_l.rows(); i++) { double change = 0; for(int j = 0; j < delta_l.cols(); j++) change += delta_l.get(i, j); B_delta_l.increment(i, change*invXsize); } else { final CountDownLatch latch = new CountDownLatch(Math.min(SystemInfo.LogicalCores, delta_l.rows())); for(int id = 0; id < SystemInfo.LogicalCores; id++) { final int ID = id; ex.submit(new Runnable() { @Override public void run() { for(int i = ID; i < delta_l.rows(); i+=SystemInfo.LogicalCores) { double change = 0; for(int j = 0; j < delta_l.cols(); j++) change += delta_l.get(i, j); B_delta_l.increment(i, change*invXsize); } latch.countDown(); } }); } try { latch.await(); } catch (InterruptedException ex1) { Logger.getLogger(SGDNetworkTrainer.class.getName()).log(Level.SEVERE, null, ex1); } } } } private void applyGradient(double eta_cur) { //apply gradient for(int l = 0; l < layersActivation.size(); l++) { B_updaters.get(l).update(B.get(l), B_deltas.get(l), eta_cur); final Matrix W_l = W.get(l); final Matrix W_dl = W_deltas.get(l); for(int i = 0; i < W_l.rows(); i++) { Vec W_li = W_l.getRowView(i); W_updaters.get(l).get(i).update(W_li, W_dl.getRowView(i), eta_cur); } regularizer.applyRegularization(W_l, B.get(l)); } } private void applyGradient(final double eta_cur, ExecutorService ex) { List<Future<?>> futures = new ArrayList<Future<?>>(); //apply gradient for(int l = 0; l < layersActivation.size(); l++) { B_updaters.get(l).update(B.get(l), B_deltas.get(l), eta_cur); final Matrix W_l = W.get(l); final Matrix W_dl = W_deltas.get(l); final int L = l; for(int indx = 0; indx < W_l.rows(); indx++) { final int i = indx; futures.add(ex.submit(new Runnable() { @Override public void run() { Vec W_li = W_l.getRowView(i); W_updaters.get(L).get(i).update(W_li, W_dl.getRowView(i), eta_cur); B.get(L).set(i, regularizer.applyRegularizationToRow(W_li, B.get(L).get(i))); } })); } } try { for(Future<?> future : futures) future.get(); } catch (InterruptedException e) { } catch (ExecutionException e) { } } /** * Applies dropout to the given matrix * @param X the matrix to dropout values from * @param randThresh the threshold that a random integer must be less than to get dropped out * @param rand the source of randomness * @param ex the source of threads for parlallel computation, or {@code null} */ private static void applyDropout(final Matrix X, final int randThresh, final Random rand, ExecutorService ex) { if (ex == null) { for (int i = 0; i < X.rows(); i++) for (int j = 0; j < X.cols(); j++) if (rand.nextInt() < randThresh) X.set(i, j, 0.0); } else { final CountDownLatch latch = new CountDownLatch(SystemInfo.LogicalCores); for(int id = 0; id < SystemInfo.LogicalCores; id++) { final int ID = id; ex.submit(new Runnable() { @Override public void run() { for (int i = ID; i < X.rows(); i+=SystemInfo.LogicalCores) for (int j = 0; j < X.cols(); j++) if (rand.nextInt() < randThresh) X.set(i, j, 0.0); latch.countDown(); } }); } try { latch.await(); } catch (InterruptedException ex1) { Logger.getLogger(SGDNetworkTrainer.class.getName()).log(Level.SEVERE, null, ex1); } } } @Override protected SGDNetworkTrainer clone() { return new SGDNetworkTrainer(this); } }
26,906
31.614545
165
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/SOM.java
package jsat.classifiers.neuralnetwork; import static java.lang.Math.*; import java.util.*; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.logging.Level; import java.util.logging.Logger; import jsat.DataSet; import jsat.classifiers.*; import jsat.distributions.empirical.kernelfunc.EpanechnikovKF; import jsat.distributions.empirical.kernelfunc.KernelFunction; import jsat.exceptions.UntrainedModelException; import jsat.linear.*; import jsat.linear.distancemetrics.DistanceMetric; import jsat.linear.distancemetrics.EuclideanDistance; import jsat.linear.vectorcollection.*; import jsat.math.decayrates.DecayRate; import jsat.math.decayrates.ExponetialDecay; import jsat.parameters.*; import jsat.utils.*; import jsat.utils.concurrent.ParallelUtils; import jsat.utils.random.RandomUtil; /** * An implementation of a Self Organizing Map, also called a Kohonen Map. It is * linked to many other algorithms, and is an unsupervised learning algorithm * that can perform classification. * <br><br> * The SOM is useful for visualizing data sets, though this is not yet implemented. * * @author Edward Raff */ public class SOM implements Classifier, Parameterized { //TODO add code for visualizing the SOM private static final long serialVersionUID = -6444988770441043797L; public static final int DEFAULT_MAX_ITERS = 500; public static final KernelFunction DEFAULT_KF = EpanechnikovKF.getInstance(); public static final double DEFAULT_LEARNING_RATE = 0.1; public static final DecayRate DEFAULT_LEARNING_DECAY = new ExponetialDecay(); public static final DecayRate DEFAULT_NEIGHBOR_DECAY = new ExponetialDecay(); private int somWidth; private int somHeight; private int maxIters; private KernelFunction kf; private double initialLearningRate; private DecayRate learningDecay; private DecayRate neighborDecay; private DistanceMetric dm; private Vec[][] weights; private CategoricalResults[] crWeightPairs; private VectorCollection<VecPaired<Vec, Integer>> vcCollection; //Used for parallel varient /** * Contains the sum of all inputs that were the BMU for the given index. * The final list of data point should be a synchronized list so that * multiple threads can add to the list safely */ private List<List<List<DataPoint>>> weightUpdates; /** * Creates a new SOM using the given parameters using the * {@link EuclideanDistance} * * @param somHeight the height of the SOM lattice * @param somWeight the weight of the SOM lattice */ public SOM(int somHeight, int somWeight) { this(new EuclideanDistance(), somHeight, somWeight); } /** * Creates a new SOM using the given parameters * @param dm the distance metric to use when comparing points * @param somHeight the height of the SOM lattice * @param somWeight the weight of the SOM lattice */ public SOM(DistanceMetric dm, int somHeight, int somWeight) { this(dm, somHeight, somWeight, new DefaultVectorCollection<VecPaired<Vec, Integer>>()); } /** * Creates a new SOM using the given parameters * @param dm the distance metric to use when comparing points * @param somHeight the height of the SOM lattice * @param somWeight the weight of the SOM lattice * @param vcFactory the vector collection to use for containing points */ public SOM(DistanceMetric dm, int somHeight, int somWeight, VectorCollection<VecPaired<Vec, Integer>> vcFactory) { this(DEFAULT_MAX_ITERS, DEFAULT_KF, DEFAULT_LEARNING_RATE, DEFAULT_LEARNING_DECAY, DEFAULT_NEIGHBOR_DECAY, dm, somHeight, somWeight, vcFactory); } private SOM(int maxIters, KernelFunction kf, double initialLearningRate, DecayRate learningDecay, DecayRate neighborDecay, DistanceMetric dm, int somHeight, int somWeight, VectorCollection<VecPaired<Vec, Integer>> vcCollection) { this.somHeight = somHeight; this.somWidth = somWeight; this.maxIters = maxIters; this.kf = kf; this.initialLearningRate = initialLearningRate; this.learningDecay = learningDecay; this.neighborDecay = neighborDecay; this.dm = dm; this.vcCollection =vcCollection; } /** * Sets the maximum number of iterations that will be used to converge * @param maxIters the max iterations of the algorithm */ public void setMaxIterations(int maxIters) { if(maxIters < 1) throw new ArithmeticException("At least one iteration must be performed"); this.maxIters = maxIters; } /** * Returns the maximum number of iterations that will be used to converge * @return the max iterations of the algorithm */ public int getMaxIterations() { return maxIters; } /** * Sets the width of the SOM lattice to create * @param somWidth the width of the lattice */ public void setSomWidth(int somWidth) { if(somWidth < 1) throw new ArithmeticException("Lattice width must be positive, not " + somWidth); this.somWidth = somWidth; } /** * Sets the height of the SOM lattice to create * @param somHeight the height of the lattice */ public void setSomHeight(int somHeight) { if(somHeight < 1) throw new ArithmeticException("ALttice height must be positive, not " + somHeight); this.somHeight = somHeight; } /** * Returns the height of the SOM lattice to create * @return the height of the lattice */ public int getSomHeight() { return somHeight; } /** * Returns the width of the SOM lattice to create * @return the width of the lattice */ public int getSomWidth() { return somWidth; } /** * Sets the rate at which input is incorporated at each iteration of the SOM * algorithm * * @param initialLearningRate the rate the SOM learns at */ public void setInitialLearningRate(double initialLearningRate) { if(Double.isInfinite(initialLearningRate) || Double.isNaN(initialLearningRate) || initialLearningRate <= 0) throw new ArithmeticException("Learning rate must be a positive constant, not " + initialLearningRate); this.initialLearningRate = initialLearningRate; } /** * Returns the rate at which input is incorporated at each iteration of the SOM * @return the rate the SOM learns at */ public double getInitialLearningRate() { return initialLearningRate; } /** * The rate the SOM learns decays over each iteration, and this defines the * way in which the rate decays. * @param learningDecay the decay for the learning rate */ public void setLearningDecay(DecayRate learningDecay) { if(learningDecay == null) throw new NullPointerException("Can not set a decay rate to null"); this.learningDecay = learningDecay; } /** * The rate the SOM learns decays over each iteration, and this defines the * way in which the rate decays. * @return the decay for the learning rate */ public DecayRate getLearningDecay() { return learningDecay; } /** * The range of effect each data point has decays with each iteration, and * this defines the way in which the rate decays. * * @param neighborDecay the decay for the neighbor range. */ public void setNeighborDecay(DecayRate neighborDecay) { if(neighborDecay == null) throw new NullPointerException("Can not set a decay rate to null"); this.neighborDecay = neighborDecay; } /** * The range of effect each data point has decays with each iteration, and * this defines the way in which the rate decays. * * @return the decay for the neighbor range. */ public DecayRate getNeighborDecay() { return neighborDecay; } /** * * @param D the dimension of the data set * @return the initial neighbor radius */ private double intitalizeWeights(int D) { //TODO random intialization is theoretical interesting, but technically slower. Faster intializations exist for(int i = 0; i < somHeight; i++) for(int j = 0; j < somWidth; j++) weights[i][j] = DenseVector.random(D); return max(somWidth, somHeight); } private void iterationStep(final ExecutorService execServ, final int i, final DataSet dataSet, final double nbrRange, final double nbrRangeSqrd, final Vec scratch, final double learnRate) { Vec input_i = dataSet.getDataPoint(i).getNumericalValues(); PairedReturn<Integer, Integer> closestBMUPR = getBMU(input_i); int xBest = closestBMUPR.getFirstItem(); int yBest = closestBMUPR.getSecondItem(); //The bounding square of values that need to be updated int xStart = Math.max((int)(xBest - nbrRange)-1, 0); int yStart = Math.max((int)(yBest - nbrRange)-1, 0); int xEnd = Math.min((int)(xBest + nbrRange)+1, somWidth); int yEnd = Math.min((int)(yBest + nbrRange)+1, somHeight); for(int x = xStart; x < xEnd; x++) { Vec[] weights_x = weights[x]; for(int y = yStart; y < yEnd; y++) { int xLength = xBest - x; int yLength = yBest - y; int pointDistSqrd = xLength*xLength + yLength*yLength; if(pointDistSqrd < nbrRangeSqrd)//point is in the circle range, { double distWeight = kf.k(sqrt(pointDistSqrd)/nbrRange); Vec weights_xy = weights_x[y]; if(execServ == null) updateWeight(input_i, scratch, weights_xy, distWeight*learnRate); else weightUpdates.get(x).get(y).add(dataSet.getDataPoint(i)); } } } } private List<VecPaired<Vec, Integer>> setUpVectorCollection(boolean parallel) { List<VecPaired<Vec, Integer>> vecList = new ArrayList<>(somWidth*somHeight); for(int i = 0; i < weights.length; i++) for(int j = 0; j < weights[i].length; j++) vecList.add(new VecPaired<>(weights[i][j], vecList.size())); vcCollection.build(parallel, vecList, dm); return vecList; } private void updateWeight(Vec input_i, Vec scratch, Vec weightVec, double scale) { input_i.copyTo(scratch); scratch.mutableSubtract(weightVec); weightVec.mutableAdd(scale, scratch); } /** * Finds the Best Matching Unit * @param numericalValues the vector to find hte BMU of * @return the BMU of the given vector */ private PairedReturn<Integer, Integer> getBMU(Vec numericalValues) { double bestDist = Double.MAX_VALUE; int x = -1, y = -1; for(int i = 0; i < weights.length; i++) { Vec[] weights_i = weights[i]; for(int j = 0; j < weights[i].length; j++) { double dist = dm.dist(weights_i[j], numericalValues); if(dist < bestDist) { bestDist =dist; x = i; y = j; } } } return new PairedReturn<>(x, y); } private void trainSOM(final DataSet dataSet, boolean parallel) throws InterruptedException { ExecutorService execServ = ParallelUtils.getNewExecutor(parallel); final int D = dataSet.getNumNumericalVars(); weights = new Vec[somHeight][somWidth]; double neighborRadius = intitalizeWeights(D); Random rand = RandomUtil.getRandom(); Vec scratch = new DenseVector(D); /** * this array is used to access the data in a random order to improve convergence */ final int[] pointAccessOrder = new int[dataSet.size()]; for(int i = 0; i < pointAccessOrder.length; i++) pointAccessOrder[i] = i; final ThreadLocal<Vec> localScratch1; final ThreadLocal<Vec> localScratch2; if(execServ != null)//Create parallel structures { weightUpdates = new ArrayList<>(somHeight); for(int i = 0; i < somHeight; i++) { ArrayList<List<DataPoint>> subList = new ArrayList<>(somWidth); weightUpdates.add(subList); for(int j = 0; j < somWidth; j++) { subList.add(Collections.synchronizedList(new ArrayList<>())); } } localScratch1 = new ThreadLocal<Vec>() { @Override protected Vec initialValue() { return new DenseVector(D); } }; localScratch2 = new ThreadLocal<Vec>() { @Override protected Vec initialValue() { return new DenseVector(D); } }; } else localScratch2 = localScratch1 = null; for(int iter = 0; iter < maxIters; iter++) { final double nbrRange = neighborDecay.rate(iter, maxIters, neighborRadius); final double nbrRangeSqrd = nbrRange*nbrRange; final double learnRate = learningDecay.rate(iter, maxIters, initialLearningRate); //Set up before data loop. Shuffle for better convergence if single threaded, create result queus for paralllel collection if(execServ == null) ArrayUtils.shuffle(pointAccessOrder, rand); else//Prep parallel structures { for(int i = 0; i < somHeight; i++) for(int j = 0; j < somWidth; j++) weightUpdates.get(i).get(j).clear(); } //Performe main loop over all data points if(execServ == null) for(int ir = 0; ir <pointAccessOrder.length; ir++) { iterationStep(execServ, pointAccessOrder[ir], dataSet, nbrRange, nbrRangeSqrd, scratch, learnRate); } else//parallel { int pos = 0; final int size = dataSet.size() / SystemInfo.LogicalCores; int extra = dataSet.size() % SystemInfo.LogicalCores; final CountDownLatch cdl = new CountDownLatch(SystemInfo.LogicalCores); while(pos < dataSet.size()) { final int to = (extra-- > 0 ? 1 : 0) + pos + size; final int start = pos; pos = to; execServ.submit(new Runnable() { @Override public void run() { for(int i = start; i < to; i++) iterationStep(execServ, i, dataSet, nbrRange, nbrRangeSqrd, localScratch1.get(), learnRate); cdl.countDown(); } }); } cdl.await(); } //Collect results if we did parallel computation if(execServ != null)//Apply changes { final CountDownLatch cdl = new CountDownLatch(somHeight*somWidth); for(int i = 0; i < somHeight; i++) for(int j = 0; j < somWidth; j++) { final List<DataPoint> dataList = weightUpdates.get(i).get(j); final int x = i, y = j; execServ.submit(new Runnable() { @Override public void run() { Vec mean = localScratch1.get(); mean.zeroOut(); double denom = 0.0; for(DataPoint dp : dataList) { //TODO, fix by just re-writing old and poorly done SOM class denom += 1; mean.mutableAdd(1, dp.getNumericalValues()); } if(denom > 0) mean.mutableDivide(denom); updateWeight(mean, localScratch2.get(), weights[x][y], learnRate); cdl.countDown(); } }); } cdl.await(); } } execServ.shutdownNow(); } @Override public CategoricalResults classify(DataPoint data) { if(crWeightPairs == null) throw new UntrainedModelException(); return crWeightPairs[vcCollection.search(data.getNumericalValues(), 1).get(0).getVector().getPair()]; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { try { trainSOM(dataSet, parallel); List<VecPaired<Vec, Integer>> vecList = setUpVectorCollection(parallel); crWeightPairs = new CategoricalResults[vecList.size()]; for(int i = 0; i < crWeightPairs.length; i++) crWeightPairs[i] = new CategoricalResults(dataSet.getClassSize()); for(int i = 0; i < dataSet.size(); i++) { DataPoint dp = dataSet.getDataPoint(i); //Single nearest neighbor is the BMU VecPaired<Vec, Integer> vpBMU = vcCollection.search(dp.getNumericalValues(), 1).get(0).getVector(); int index = vpBMU.getPair(); crWeightPairs[index].incProb(dataSet.getDataPointCategory(i), dataSet.getWeight(i)); } for(int i = 0; i < crWeightPairs.length; i++) crWeightPairs[i].normalize(); } catch (InterruptedException ex) { Logger.getLogger(SOM.class.getName()).log(Level.SEVERE, null, ex); } } @Override public boolean supportsWeightedData() { return true; } @Override public SOM clone() { SOM clone = new SOM(maxIters, kf, initialLearningRate, learningDecay, neighborDecay, dm.clone(), somHeight, somHeight, vcCollection.clone()); if(this.weights != null) { clone.weights = new Vec[this.weights.length][this.weights[0].length]; for(int i = 0; i < this.weights.length; i++) for(int j = 0; j < this.weights[i].length; j++) clone.weights[i][j] = this.weights[i][j].clone(); } if(this.vcCollection != null) clone.vcCollection = this.vcCollection.clone(); if(this.crWeightPairs != null) { clone.crWeightPairs = new CategoricalResults[this.crWeightPairs.length]; for(int i = 0; i < this.crWeightPairs.length; i++) clone.crWeightPairs[i] = this.crWeightPairs[i].clone(); } return clone; } }
19,934
34.789946
232
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/activations/ActivationLayer.java
package jsat.classifiers.neuralnetwork.activations; import java.io.Serializable; import jsat.linear.Matrix; import jsat.linear.Vec; /** * This interface defines a type of activation layer for use in a Neural Network * @author Edward Raff */ public interface ActivationLayer extends Serializable { /** * Computes the activation function of this layer on the given input. * @param input the raw input to compute the activation for * @param output the location to store the activation in */ public void activate(Vec input, Vec output); /** * Computes the activation function of this layer on the given input. * @param input the raw input to compute the activation for * @param output the location to store the activation in * @param rowMajor {@code true} if the information per input is stored in * rows, {@code false} if the inputs were stored by column. This parameter * does not indicate if the matrices themselves are backed by a row or * column major implementation */ public void activate(Matrix input, Matrix output, boolean rowMajor); /** * This method computes the backpropagated error to a given layer. Often * denoted as &delta;<sup>l</sup> = w<sup><small>l+1</small> <b>T</b></sup> * &delta;<sup>l+1</sup> &otimes; &part; f(x<sup>l</sup>), where &part; is * the Hadamard product and &part; f(x<sup>l</sup>) is the derivative of * this activation function on the input that was feed into this activation. * <br> * {@code delta_partial} and {@code errout} may point to the same vector * object * @param input the input to this layer that was feed in to be activated * @param output the activation that was produced for this layer * @param delta_partial the error assigned to this layer from the above * layer, sans the hamard product with the derivative of the layer * activation. Often denoted as w<sup><small>l+1</small> <b>T</b></sup> &delta;<sup>l+1</sup> * @param errout the delta value or error produced for this layer */ public void backprop(Vec input, Vec output, Vec delta_partial, Vec errout); /** * This method computes the backpropagated error to a given layer. Often * denoted as &delta;<sup>l</sup> = w<sup><small>l+1</small> <b>T</b></sup> * &delta;<sup>l+1</sup> &otimes; &part; f(x<sup>l</sup>), where &part; is * the Hadamard product and &part; f(x<sup>l</sup>) is the derivative of * this activation function on the input that was feed into this activation. * <br> * {@code delta_partial} and {@code errout} may point to the same vector * object * @param input the input to this layer that was feed in to be activated * @param output the activation that was produced for this layer * @param delta_partial the error assigned to this layer from the above * layer, sans the hamard product with the derivative of the layer * activation. Often denoted as w<sup><small>l+1</small> <b>T</b></sup> &delta;<sup>l+1</sup> * @param errout the delta value or error produced for this layer * @param rowMajor {@code true} if the information per input is stored in * rows, {@code false} if the inputs were stored by column. This parameter * does not indicate if the matrices themselves are backed by a row or * column major implementation */ public void backprop(Matrix input, Matrix output, Matrix delta_partial, Matrix errout, boolean rowMajor); public ActivationLayer clone(); }
3,606
48.410959
109
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/activations/LinearLayer.java
package jsat.classifiers.neuralnetwork.activations; import jsat.linear.Matrix; import jsat.linear.Vec; /** * * @author Edward Raff */ public class LinearLayer implements ActivationLayer { private static final long serialVersionUID = -4040058095010471379L; @Override public void activate(Vec input, Vec output) { input.copyTo(output); } @Override public void activate(Matrix input, Matrix output, boolean rowMajor) { input.copyTo(output); } @Override public void backprop(Vec input, Vec output, Vec delta_partial, Vec errout) { delta_partial.copyTo(errout); } @Override public void backprop(Matrix input, Matrix output, Matrix delta_partial, Matrix errout, boolean rowMajor) { delta_partial.copyTo(errout); } @Override public LinearLayer clone() { return new LinearLayer(); } }
916
18.510638
108
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/activations/ReLU.java
package jsat.classifiers.neuralnetwork.activations; import jsat.linear.Matrix; import jsat.linear.Vec; /** * This Activation Layer is for <b>Re</b>ctified <b>L</b>inear <b>U</b>nits. A * ReLU activation is simply f(x) = max(0, x), and is thus very fast to compute. * <br> * See: Nair, V., &amp; Hinton, G. E. (2010). <i>Rectified Linear Units Improve * Restricted Boltzmann Machines</i>. Proceedings of the 27th International * Conference on Machine Learning, 807–814. * @author Edward Raff */ public class ReLU implements ActivationLayer { private static final long serialVersionUID = -6691240473485759789L; @Override public void activate(Vec input, Vec output) { for(int i = 0; i < input.length(); i++) output.set(i, Math.max(0, input.get(i))); } @Override public void activate(Matrix input, Matrix output, boolean rowMajor) { for(int i = 0; i < input.rows(); i++) for(int j = 0; j < input.cols(); j++) output.set(i, j, Math.max(0, input.get(i, j))); } @Override public void backprop(Vec input, Vec output, Vec delta_partial, Vec errout) { for(int i = 0; i < input.length(); i++) { double out_i = output.get(i); if(out_i != 0) errout.set(i, delta_partial.get(i)); else errout.set(i, 0.0); } } @Override public void backprop(Matrix input, Matrix output, Matrix delta_partial, Matrix errout, boolean rowMajor) { for (int i = 0; i < input.rows(); i++) for (int j = 0; j < input.cols(); j++) if (output.get(i, j) != 0) errout.set(i, j, delta_partial.get(i, j)); else errout.set(i, j, 0.0); } @Override public ReLU clone() { return new ReLU(); } }
1,899
27.358209
108
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/activations/SigmoidLayer.java
package jsat.classifiers.neuralnetwork.activations; import jsat.linear.Matrix; import jsat.linear.Vec; /** * This layer provides the standard Sigmoid activation f(x) = * 1/(1+exp(-x)) * * @author Edward Raff */ public class SigmoidLayer implements ActivationLayer { private static final long serialVersionUID = 160273287445169627L; @Override public void activate(Vec input, Vec output) { for(int i = 0; i < input.length(); i++) output.set(i, 1/(1+Math.exp(-input.get(i)))); } @Override public void activate(Matrix input, Matrix output, boolean rowMajor) { for(int i = 0; i < input.rows(); i++) for(int j = 0; j < input.cols(); j++) output.set(i, j, 1.0/(1+Math.exp(-input.get(i, j)))); } @Override public void backprop(Vec input, Vec output, Vec delta_partial, Vec errout) { for(int i = 0; i < input.length(); i++) { double out_i = output.get(i); double errin_i = delta_partial.get(i); errout.set(i, out_i*(1-out_i)*errin_i); } } @Override public void backprop(Matrix input, Matrix output, Matrix delta_partial, Matrix errout, boolean rowMajor) { for(int i = 0; i < input.rows(); i++) for(int j = 0; j < input.cols(); j++) { double out_ij = output.get(i, j); double errin_ij = delta_partial.get(i, j); errout.set(i, j, out_ij*(1-out_ij)*errin_ij); } } @Override public SigmoidLayer clone() { return new SigmoidLayer(); } }
1,647
24.353846
108
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/activations/SoftSignLayer.java
package jsat.classifiers.neuralnetwork.activations; import jsat.linear.Matrix; import jsat.linear.Vec; /** * This provides the Soft Sign activation function f(x) = x/(1+abs(x)), which is * similar to the {@link TanhLayer tanh} activation and has a min/max of -1 and * 1. However it is significantly faster to compute. <br> * <br> * See: Glorot, X., &amp; Bengio, Y. (2010). <i>Understanding the difficulty of * training deep feedforward neural networks</i>. Journal of Machine Learning * Research - Proceedings Track, 9, 249–256. Retrieved from * <a href="http://jmlr.csail.mit.edu/proceedings/papers/v9/glorot10a/glorot10a.pdf"> * here</a> * @author Edward Raff */ public class SoftSignLayer implements ActivationLayer { private static final long serialVersionUID = 9137125423044227288L; @Override public void activate(Vec input, Vec output) { for(int i = 0; i < input.length(); i++) { double in_i = input.get(i); output.set(i, in_i/(1.0+Math.abs(in_i))); } } @Override public void activate(Matrix input, Matrix output, boolean rowMajor) { for(int i = 0; i < input.rows(); i++) for(int j = 0; j < input.cols(); j++) { double in_ij = input.get(i, j); output.set(i, j, in_ij/(1.0+Math.abs(in_ij))); } } @Override public void backprop(Vec input, Vec output, Vec delta_partial, Vec errout) { for(int i = 0; i < input.length(); i++) { double tmp_i = (1-Math.abs(output.get(i))); double errin_i = delta_partial.get(i); errout.set(i, tmp_i*tmp_i*errin_i); } } @Override public void backprop(Matrix input, Matrix output, Matrix delta_partial, Matrix errout, boolean rowMajor) { for(int i = 0; i < input.rows(); i++) for (int j = 0; j < input.cols(); j++) { double tmp_ij = (1 - Math.abs(output.get(i, j))); double errin_ij = delta_partial.get(i, j); errout.set(i, j, tmp_ij*tmp_ij*errin_ij); } } @Override public SoftSignLayer clone() { return new SoftSignLayer(); } }
2,257
29.106667
108
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/activations/SoftmaxLayer.java
package jsat.classifiers.neuralnetwork.activations; import jsat.linear.Matrix; import jsat.linear.Vec; import jsat.math.MathTricks; /** * This activation layer is meant to be used as the top-most layer for * classification problems, and uses the softmax function (also known as cross * entropy) to convert the inputs into probabilities. * * @author Edward Raff */ public class SoftmaxLayer implements ActivationLayer { private static final long serialVersionUID = -6595701781466123463L; @Override public void activate(Vec input, Vec output) { input.copyTo(output); MathTricks.softmax(output, false); } @Override public void backprop(Vec input, Vec output, Vec delta_partial, Vec errout) { if(delta_partial != errout)//if the same object, nothing to do delta_partial.copyTo(errout); } @Override public void activate(Matrix input, Matrix output, boolean rowMajor) { if(rowMajor)//easy for(int i = 0; i < input.rows(); i++) activate(input.getRowView(i), output.getRowView(i)); else//TODO, do this more efficently for(int j = 0; j < input.cols(); j++) activate(input.getColumnView(j), output.getColumnView(j)); } @Override public void backprop(Matrix input, Matrix output, Matrix delta_partial, Matrix errout, boolean rowMajor) { if(delta_partial != errout)//if the same object, nothing to do delta_partial.copyTo(errout); } @Override public SoftmaxLayer clone() { return new SoftmaxLayer(); } }
1,629
26.627119
108
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/activations/TanhLayer.java
package jsat.classifiers.neuralnetwork.activations; import jsat.linear.Matrix; import jsat.linear.Vec; /** * This layer provides the standard tanh activation f(x) = * tanh(x) * * @author Edward Raff */ public class TanhLayer implements ActivationLayer { private static final long serialVersionUID = -8369008344962638121L; @Override public void activate(Vec input, Vec output) { for(int i = 0; i < input.length(); i++) output.set(i, Math.tanh(input.get(i))); } @Override public void activate(Matrix input, Matrix output, boolean rowMajor) { for(int i = 0; i < input.rows(); i++) for (int j = 0; j < input.cols(); j++) output.set(i, j, Math.tanh(input.get(i, j))); } @Override public void backprop(Vec input, Vec output, Vec delta_partial, Vec errout) { for(int i = 0; i < input.length(); i++) { double out_i = output.get(i); double errin_i = delta_partial.get(i); errout.set(i, (1-out_i*out_i)*errin_i); } } @Override public void backprop(Matrix input, Matrix output, Matrix delta_partial, Matrix errout, boolean rowMajor) { for(int i = 0; i < input.rows(); i++) for (int j = 0; j < input.cols(); j++) { double out_ij = output.get(i, j); double errin_ij = delta_partial.get(i, j); errout.set(i, j, (1-out_ij*out_ij)*errin_ij); } } @Override public TanhLayer clone() { return new TanhLayer(); } }
1,613
24.619048
108
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/initializers/BiastInitializer.java
package jsat.classifiers.neuralnetwork.initializers; import java.io.Serializable; import java.util.Random; import jsat.linear.Vec; /** * This interface specifies the method of initializing the bias connections in a * neural network. * @author Edward Raff */ public interface BiastInitializer extends Serializable { /** * Performs the initialization of the given vector of bias values * @param b the vector to store the biases in * @param fanIn the number of connections coming into the layer that these * biases are for. * @param rand the source of randomness for initialization */ public void init(Vec b, int fanIn, Random rand); public BiastInitializer clone(); }
723
27.96
80
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/initializers/ConstantInit.java
package jsat.classifiers.neuralnetwork.initializers; import java.util.Random; import jsat.linear.ConstantVector; import jsat.linear.Vec; /** * This initializes all bias values to a single constant value * @author Edward Raff */ public class ConstantInit implements BiastInitializer { private static final long serialVersionUID = 2638413936718283757L; private double c; /** * * @param c the constant to set all biases to */ public ConstantInit(double c) { this.c = c; } /** * * @param c the constant value to use */ public void setConstant(double c) { if(Double.isNaN(c) || Double.isInfinite(c)) throw new IllegalArgumentException("Constant must be a real value, not " + c); this.c = c; } /** * * @return the constant value that will be used for initialization */ public double getConstant() { return c; } @Override public void init(Vec b, int fanIn, Random rand) { new ConstantVector(c, b.length()).copyTo(b); } @Override public ConstantInit clone() { return new ConstantInit(c); } }
1,194
19.254237
90
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/initializers/GaussianNormalInit.java
package jsat.classifiers.neuralnetwork.initializers; import java.util.Random; import jsat.linear.Matrix; import jsat.linear.Vec; /** * This object initializes the values of weights by sampling from the zero mean * Gaussian * @author Edward Raff */ public class GaussianNormalInit implements WeightInitializer, BiastInitializer { private static final long serialVersionUID = -882418891606717433L; private double stndDev; /** * Creates a new GuassianNormalInit object for initializing weights * @param stndDev the standard deviation of the distribution to sample from */ public GaussianNormalInit(double stndDev) { this.stndDev = stndDev; } /** * Sets the standard deviation of the distribution that will be sampled from * @param stndDev the standard deviation to use */ public void setStndDev(double stndDev) { this.stndDev = stndDev; } /** * * @return the standard deviation of the Gaussian that is sampled from */ public double getStndDev() { return stndDev; } @Override public void init(Matrix w, Random rand) { for(int i = 0; i < w.rows(); i++) for(int j = 0; j < w.cols(); j++) w.set(i, j, rand.nextGaussian()*stndDev); } @Override public void init(Vec b, int fanIn, Random rand) { for(int i = 0; i < b.length(); i++) b.set(i, rand.nextGaussian()*stndDev); } @Override public GaussianNormalInit clone() { return new GaussianNormalInit(stndDev); } }
1,608
23.014925
80
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/initializers/TanhInitializer.java
package jsat.classifiers.neuralnetwork.initializers; import java.util.Random; import jsat.linear.Matrix; import jsat.linear.Vec; /** * This initializer samples the weights from an adjusted uniform distribution * in order to provided better behavior of neuron activation and gradients<br> * <br> * See: Glorot, X., &amp; Bengio, Y. (2010). <i>Understanding the difficulty of * training deep feedforward neural networks</i>. Journal of Machine Learning * Research - Proceedings Track, 9, 249–256. Retrieved from * <a href="http://jmlr.csail.mit.edu/proceedings/papers/v9/glorot10a/glorot10a.pdf"> * here</a> * @author Edward Raff */ public class TanhInitializer implements WeightInitializer, BiastInitializer { private static final long serialVersionUID = -4770682311082616208L; @Override public void init(Matrix w, Random rand) { double cnt = Math.sqrt(6)/Math.sqrt(w.rows()+w.cols()); for(int i = 0; i < w.rows(); i++) for(int j = 0; j < w.cols(); j++) w.set(i, j, rand.nextDouble()*cnt*2-cnt); } @Override public void init(Vec b, int fanIn, Random rand) { double cnt = Math.sqrt(6)/Math.sqrt(b.length()+fanIn); for(int i = 0; i < b.length(); i++) b.set(i, rand.nextDouble()*cnt*2-cnt); } @Override public TanhInitializer clone() { return new TanhInitializer(); } }
1,427
28.142857
85
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/initializers/WeightInitializer.java
package jsat.classifiers.neuralnetwork.initializers; import java.io.Serializable; import java.util.Random; import jsat.linear.Matrix; /** * This interface specifies the method of initializing the weight connections in * a neural network. * * @author Edward Raff */ public interface WeightInitializer extends Serializable { /** * Initializes the values of the given weight matrix * @param w the matrix to initialize * @param rand the source of randomness for the initialization */ public void init(Matrix w, Random rand); public WeightInitializer clone(); }
605
23.24
80
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/regularizers/Max2NormRegularizer.java
package jsat.classifiers.neuralnetwork.regularizers; import java.util.ArrayList; import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.logging.Level; import java.util.logging.Logger; import jsat.linear.Matrix; import jsat.linear.Vec; /** * This regularizer restricts the norm of each neuron's weights to be bounded by * a fixed constant, and rescaled when the norm is exceeded. * * @author Edward Raff */ public class Max2NormRegularizer implements WeightRegularizer { private static final long serialVersionUID = 1989826758516880355L; private double maxNorm; public Max2NormRegularizer(double maxNorm) { setMaxNorm(maxNorm); } /** * Sets the maximum allowed 2 norm for a single neuron's weights * @param maxNorm the maximum norm per neuron's weights */ public void setMaxNorm(double maxNorm) { if(Double.isNaN(maxNorm) || Double.isInfinite(maxNorm) || maxNorm <= 0) throw new IllegalArgumentException("The maximum norm must be a positive constant, not " + maxNorm); this.maxNorm = maxNorm; } /** * * @return the maximum allowed 2 norm for a single neuron's weights */ public double getMaxNorm() { return maxNorm; } @Override public void applyRegularization(Matrix W, Vec b) { for (int i = 0; i < W.rows(); i++) { Vec W_li = W.getRowView(i); double norm = W_li.pNorm(2); if (norm >= maxNorm) { W_li.mutableMultiply(maxNorm / norm); double oldB_i = b.get(i); b.set(i, oldB_i * maxNorm / norm); } } } @Override public void applyRegularization(final Matrix W, final Vec b, ExecutorService ex) { List<Future<?>> futures = new ArrayList<Future<?>>(W.rows()); for (int indx = 0; indx < W.rows(); indx++) { final int i = indx; futures.add(ex.submit(new Runnable() { @Override public void run() { Vec W_li = W.getRowView(i); double norm = W_li.pNorm(2); if (norm >= maxNorm) { W_li.mutableMultiply(maxNorm / norm); double oldB_i = b.get(i); b.set(i, oldB_i * maxNorm / norm); } } })); } try { for (Future<?> future : futures) future.get(); } catch (InterruptedException ex1) { Logger.getLogger(Max2NormRegularizer.class.getName()).log(Level.SEVERE, null, ex1); } catch (ExecutionException ex1) { Logger.getLogger(Max2NormRegularizer.class.getName()).log(Level.SEVERE, null, ex1); } } @Override public double applyRegularizationToRow(Vec w, double b) { double norm = w.pNorm(2); if (norm >= maxNorm) { w.mutableMultiply(maxNorm / norm); return b * maxNorm / norm; } return b; } @Override public Max2NormRegularizer clone() { return new Max2NormRegularizer(maxNorm); } }
3,445
26.349206
111
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/neuralnetwork/regularizers/WeightRegularizer.java
package jsat.classifiers.neuralnetwork.regularizers; import java.io.Serializable; import java.util.concurrent.ExecutorService; import jsat.linear.Matrix; import jsat.linear.Vec; /** * This interface defines the contract for applying a regularization scheme to * the weight and bias values of a laying in a neural network. * * @author Edward Raff */ public interface WeightRegularizer extends Serializable { /** * Applies regularization to one matrix, where the rows of the matrix * correspond tot he weights associated to one neuron's input. The vector of * bias terms must then have the same length as the number of rows in the * given matrix. * @param W the matrix to apply regularization to * @param b the vector of bias terms to apply regularization to */ public void applyRegularization(Matrix W, Vec b); /** * Applies regularization to one matrix, where the rows of the matrix * correspond tot he weights associated to one neuron's input. The vector of * bias terms must then have the same length as the number of rows in the * given matrix. * @param W the matrix to apply regularization to * @param b the vector of bias terms to apply regularization to * @param ex the source of threads for parallel computation */ public void applyRegularization(Matrix W, Vec b, ExecutorService ex); /** * Applies the regularization to one row of the weight matrix, where the row * corresponds to the weights into one neuron. * * @param w the weight row to be altered depending on the regularization method * @param b the original bias input to this row * @return the new bias value, or the same value if no change in the bias has occurred */ public double applyRegularizationToRow(Vec w, double b); public WeightRegularizer clone(); }
1,900
37.02
90
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/svm/DCD.java
package jsat.classifiers.svm; import java.util.*; import java.util.concurrent.ExecutorService; import jsat.SingleWeightVectorModel; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.DataPoint; import jsat.classifiers.calibration.BinaryScoreClassifier; import static jsat.classifiers.svm.DCDs.eq24; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.linear.DenseVector; import jsat.linear.Vec; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.regression.RegressionDataSet; import jsat.regression.Regressor; import jsat.utils.IntList; import jsat.utils.ListUtils; import jsat.utils.random.RandomUtil; /** * Implements Dual Coordinate Descent (DCD) training algorithms for a Linear * L<sup>1</sup> or L<sup>2</sup> Support Vector Machine for binary * classification and regression. * NOTE: While this implementation makes use of the dual formulation only the linear * kernel is ever used. The algorithm also uses the primal representation and uses * the explicit formulation of <i>w</i> in training and classification. As such, * the support vectors found are not necessary once training is complete - * and will be discarded. * <br><br> * See: * <ul> * <li> * Hsieh, C.-J., Chang, K.-W., Lin, C.-J., Keerthi, S. S.,&amp;Sundararajan, S. * (2008). <i>A Dual Coordinate Descent Method for Large-scale Linear SVM</i>. * Proceedings of the 25th international conference on Machine learning - ICML * ’08 (pp. 408–415). New York, New York, USA: ACM Press. * doi:10.1145/1390156.1390208 * </li> * <li> * Ho, C.-H.,&amp;Lin, C.-J. (2012). <i>Large-scale Linear Support Vector * Regression</i>. Journal of Machine Learning Research, 13, 3323–3348. * Retrieved from <a href="http://ntu.csie.org/~cjlin/papers/linear-svr.pdf"> * here</a> * </ul> * @author Edward Raff * @see DCDs */ public class DCD implements BinaryScoreClassifier, Regressor, Parameterized, SingleWeightVectorModel { private static final long serialVersionUID = -1489225034030922798L; private int maxIterations; private Vec[] vecs; private double[] alpha; private double[] y; private double bias; private Vec w; private double C; private boolean useL1; private boolean onlineVersion = false; private double eps = 0.001; private boolean useBias = true; /** * Creates a new DCDL2 SVM object */ public DCD() { this(10000, false); } /** * Creates a new DCD SVM object. The default C value of 1 is * used as suggested in the original paper. * @param maxIterations the maximum number of training iterations * @param useL1 whether or not to use L1 or L2 form */ public DCD(int maxIterations, boolean useL1) { this(maxIterations, 1, useL1); } /** * Creates a new DCD SVM object * @param maxIterations the maximum number of training iterations * @param C the misclassification penalty * @param useL1 whether or not to use L1 or L2 form */ public DCD(int maxIterations, double C, boolean useL1) { this.maxIterations = maxIterations; this.C = C; this.useL1 = useL1; } /** * By default, Algorithm 1 is used. Algorithm 2 is an "online" version * that updates the dual form by only one data point at a time. This * controls which version is used. * @param onlineVersion <tt>false</tt> to use algorithm 1, <tt>true</tt> * to use algorithm 2 */ public void setOnlineVersion(boolean onlineVersion) { this.onlineVersion = onlineVersion; } /** * Returns whether or not the online version of the algorithm, * algorithm 2 is in use. * @return <tt>true</tt> if algorithm 2 is in use, <tt>false</tt> if * algorithm 1 */ public boolean isOnlineVersion() { return onlineVersion; } /** * Sets the {@code eps} used in the epsilon insensitive loss function used * when performing regression. Errors in the output that less than * {@code eps} during training are treated as correct. * <br> * This parameter has no impact on classification problems. * * @param eps the non-negative value to use as the error tolerance in regression */ public void setEps(double eps) { if(Double.isNaN(eps) || eps < 0 || Double.isInfinite(eps)) throw new IllegalArgumentException("eps must be non-negative, not "+eps); this.eps = eps; } /** * Returns the epsilon insensitivity parameter used in regression problems. * @return the epsilon insensitivity parameter used in regression problems. */ public double getEps() { return eps; } /** * Sets the penalty parameter for misclassifications. The recommended value * is 1, and values larger than 4 are not normally needed according to the * original paper. * * @param C the penalty parameter in (0, Inf) */ public void setC(double C) { if(Double.isNaN(C) || Double.isInfinite(C) || C <= 0) throw new ArithmeticException("Penalty parameter must be a positive value, not " + C); this.C = C; } /** * Returns the penalty parameter for misclassifications. * @return the penalty parameter for misclassifications. */ public double getC() { return C; } /** * Determines whether or not to use the L<sup>1</sup> or L<sup>2</sup> SVM * @param useL1 <tt>true</tt> to use the L<sup>1</sup> form, <tt>false</tt> to use the L<sup>2</sup> form. */ public void setUseL1(boolean useL1) { this.useL1 = useL1; } /** * Returns <tt>true</tt> if the L<sup>1</sup> form is in use * @return <tt>true</tt> if the L<sup>1</sup> form is in use */ public boolean isUseL1() { return useL1; } /** * Sets the maximum number of iterations allowed through the whole training * set. * @param maxIterations the maximum number of training epochs */ public void setMaxIterations(int maxIterations) { if(maxIterations <= 0) throw new IllegalArgumentException("Number of iterations must be positive, not " + maxIterations); this.maxIterations = maxIterations; } /** * Returns the maximum number of allowed training epochs * @return the maximum number of allowed training epochs */ public int getMaxIterations() { return maxIterations; } /** * Sets whether or not an implicit bias term should be added to the inputs. * @param useBias {@code true} to add an implicit bias term to inputs, * {@code false} to use the input data as provided. */ public void setUseBias(boolean useBias) { this.useBias = useBias; } /** * Returns {@code true} if an implicit bias term is in use, or {@code false} * if not. * @return {@code true} if an implicit bias term is in use, or {@code false} * if not. */ public boolean isUseBias() { return useBias; } @Override public Vec getRawWeight() { return w; } @Override public double getBias() { return bias; } @Override public Vec getRawWeight(int index) { if(index < 1) return getRawWeight(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public double getBias(int index) { if (index < 1) return getBias(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public int numWeightsVecs() { return 1; } @Override public CategoricalResults classify(DataPoint data) { if (w == null) throw new UntrainedModelException("The model has not been trained"); CategoricalResults cr = new CategoricalResults(2); if (getScore(data) < 0) cr.setProb(0, 1.0); else cr.setProb(1, 1.0); return cr; } @Override public double getScore(DataPoint dp) { return w.dot(dp.getNumericalValues()) + bias; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { if(dataSet.getClassSize() != 2) throw new FailedToFitException("SVM only supports binary classificaiton problems"); vecs = new Vec[dataSet.size()]; alpha = new double[vecs.length]; y = new double[vecs.length]; bias = 0; final double[] Qhs = new double[vecs.length];//Q hats final double U = getU(), D = getD(); for(int i = 0; i < dataSet.size(); i++) { vecs[i] = dataSet.getDataPoint(i).getNumericalValues(); y[i] = dataSet.getDataPointCategory(i)*2-1; Qhs[i] = vecs[i].dot(vecs[i])+D; if(useBias) Qhs[i] += 1.0; } w = new DenseVector(vecs[0].length()); List<Integer> A = new IntList(vecs.length); ListUtils.addRange(A, 0, vecs.length, 1); Random rand = RandomUtil.getRandom(); for(int t = 0; t < maxIterations; t++ ) { if(onlineVersion) { int i = rand.nextInt(vecs.length); performUpdate(i, D, U, Qhs[i]); } else { Collections.shuffle(A, rand); for(int i : A) performUpdate(i, D, U, Qhs[i]); } } } /** * Performs steps a, b, and c of the DCD algorithms 1 and 2 * @param i the index to update * @param D the value of D * @param U the value of U * @param Qh_ii the Q hat value that will be used in this update. */ private void performUpdate(final int i, final double D, final double U, final double Qh_ii) { //a final double G = y[i]*(w.dot(vecs[i])+bias)-1+D*alpha[i]; //b final double PG; if(alpha[i] == 0) PG = Math.min(G, 0); else if(alpha[i] == U) PG = Math.max(G, 0); else PG = G; //c if(PG != 0) { final double alphaOld = alpha[i]; alpha[i] = Math.min(Math.max(alpha[i]-G/Qh_ii, 0), U); final double scale = (alpha[i]-alphaOld)*y[i]; w.mutableAdd(scale, vecs[i]); if(useBias) bias += scale; } } @Override public double regress(DataPoint data) { return w.dot(data.getNumericalValues())+bias; } @Override public void train(RegressionDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(RegressionDataSet dataSet) { vecs = new Vec[dataSet.size()]; /** * Makes the Beta vector in the Algo 4 description */ alpha = new double[vecs.length]; y = new double[vecs.length]; bias = 0; final double[] Qhs = new double[vecs.length];//Q hats final double U = getU(), lambda = getD(); double v_0 = 0; for(int i = 0; i < dataSet.size(); i++) { vecs[i] = dataSet.getDataPoint(i).getNumericalValues(); y[i] = dataSet.getTargetValue(i); Qhs[i] = vecs[i].dot(vecs[i])+lambda; if (useBias) Qhs[i] += 1.0; v_0 += Math.abs(eq24(0, -y[i]-eps, -y[i]+eps, U)); } w = new DenseVector(vecs[0].length()); IntList activeSet = new IntList(vecs.length); ListUtils.addRange(activeSet, 0, vecs.length, 1); @SuppressWarnings("unused") double M = Double.POSITIVE_INFINITY; for(int iteration = 0; iteration < maxIterations; iteration++) { double maxVk = Double.NEGATIVE_INFINITY; double vKSum = 0; //6.1 Randomly permute T Collections.shuffle(activeSet); //6.2 For i in T Iterator<Integer> iter = activeSet.iterator(); while(iter.hasNext()) { final int i = iter.next(); final double y_i = y[i]; final Vec x_i = vecs[i]; final double wDotX = w.dot(x_i)+bias; final double g = -y_i + wDotX + lambda * alpha[i]; final double gP = g + eps; final double gN = g - eps; final double v_i = eq24(alpha[i], gN, gP, U); maxVk = Math.max(maxVk, v_i); vKSum += Math.abs(v_i); //eq (22) final double Q_ii = Qhs[i]; final double d; if (gP < Q_ii * alpha[i]) d = -gP / Q_ii; else if (gN > Q_ii * alpha[i]) d = -gN / Q_ii; else d = -alpha[i]; if (Math.abs(d) < 1e-14) continue; //s = max(−U, min(U,beta_i +d)) eq (21) final double s = Math.max(-U, Math.min(U, alpha[i]+d)); w.mutableAdd(s-alpha[i], x_i); if(useBias) bias += (s-alpha[i]); alpha[i] = s; } //convergence check if(vKSum/v_0 < 1e-4)//converged break; else M = maxVk; } } private double getU() { if(useL1) return C; else return Double.POSITIVE_INFINITY; } private double getD() { if(useL1) return 0; else return 1/(2*C); } @Override public boolean supportsWeightedData() { return false; } @Override public DCD clone() { DCD clone = new DCD(maxIterations, C, useL1); clone.onlineVersion = this.onlineVersion; clone.bias = this.bias; clone.useBias = this.useBias; if(this.w != null) clone.w = this.w.clone(); return clone; } }
14,690
28.26494
111
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/svm/DCDs.java
package jsat.classifiers.svm; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Random; import java.util.concurrent.ExecutorService; import jsat.SingleWeightVectorModel; import jsat.classifiers.*; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.linear.DenseVector; import jsat.linear.Vec; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.regression.*; import jsat.utils.IntList; import jsat.utils.ListUtils; import jsat.utils.random.XORWOW; import java.util.*; import jsat.DataSet; import jsat.distributions.Distribution; import jsat.distributions.Exponential; import jsat.utils.random.RandomUtil; /** * Implements Dual Coordinate Descent with shrinking (DCDs) training algorithms * for a Linear L<sup>1</sup> or L<sup>2</sup> Support Vector Machine for binary * classification and regression. * NOTE: While this implementation makes use of the dual formulation only the linear * kernel is ever used. The algorithm also uses the primal representation and uses * the explicit formulation of <i>w</i> in training and classification. As such, * the support vectors found are not necessary once training is complete - * and will be discarded.<br> * <br> * DCDs man be warm started by other DCDs models trained on the same data set. * <br><br> * See: * <ul> * <li> * Hsieh, C.-J., Chang, K.-W., Lin, C.-J., Keerthi, S. S., &amp; Sundararajan, S. * (2008). <i>A Dual Coordinate Descent Method for Large-scale Linear SVM</i>. * Proceedings of the 25th international conference on Machine learning - ICML * ’08 (pp. 408–415). New York, New York, USA: ACM Press. * doi:10.1145/1390156.1390208 * </li> * <li> * Ho, C.-H., &amp; Lin, C.-J. (2012). <i>Large-scale Linear Support Vector * Regression</i>. Journal of Machine Learning Research, 13, 3323–3348. * Retrieved from <a href="http://ntu.csie.org/~cjlin/papers/linear-svr.pdf"> * here</a> * </ul> * @author Edward Raff * @see DCD */ public class DCDs implements BinaryScoreClassifier, Regressor, Parameterized, SingleWeightVectorModel, WarmClassifier, WarmRegressor { private static final long serialVersionUID = -1686294187234524696L; private int maxIterations; private double tolerance; private Vec[] vecs; private double[] alpha; private double[] y; private double bias; private Vec w; private double C; private boolean useL1; private double eps = 0.001; private boolean useBias = true; /** * Creates a new DCDL2 SVM object */ public DCDs() { this(10000, false); } /** * Creates a new DCD SVM object * @param maxIterations the maximum number of training iterations * @param useL1 whether or not to use L1 or L2 form */ public DCDs(int maxIterations, boolean useL1) { this(maxIterations, 1e-3, 1, useL1); } /** * Creates a new DCD SVM object * @param maxIterations the maximum number of training iterations * @param tolerance the tolerance value for early stopping * @param C the misclassification penalty * @param useL1 whether or not to use L1 or L2 form */ public DCDs(int maxIterations, double tolerance, double C, boolean useL1) { setMaxIterations(maxIterations); setTolerance(tolerance); setC(C); setUseL1(useL1); } /** * Sets the penalty parameter for misclassifications. The recommended value * is 1, and values larger than 4 are not normally needed according to the * original paper. * * @param C the penalty parameter in (0, Inf) */ public void setC(double C) { if(Double.isNaN(C) || Double.isInfinite(C) || C <= 0) throw new ArithmeticException("Penalty parameter must be a positive value, not " + C); this.C = C; } /** * Returns the penalty parameter for misclassifications. * @return the penalty parameter for misclassifications. */ public double getC() { return C; } /** * Sets the {@code eps} used in the epsilon insensitive loss function used * when performing regression. Errors in the output that less than * {@code eps} during training are treated as correct. * <br> * This parameter has no impact on classification problems. * * @param eps the non-negative value to use as the error tolerance in regression */ public void setEps(double eps) { if(Double.isNaN(eps) || eps < 0 || Double.isInfinite(eps)) throw new IllegalArgumentException("eps must be non-negative, not "+eps); this.eps = eps; } /** * Returns the epsilon insensitivity parameter used in regression problems. * @return the epsilon insensitivity parameter used in regression problems. */ public double getEps() { return eps; } /** * Sets the tolerance for the stopping condition when training, a small value near * zero allows training to stop early when little to no additional convergence * is possible. * * @param tolerance the tolerance value to use to stop early */ public void setTolerance(double tolerance) { this.tolerance = tolerance; } /** * Returns the tolerance value used to terminate early * @return the tolerance value used to terminate early */ public double getTolerance() { return tolerance; } /** * Determines whether or not to use the L<sup>1</sup> or L<sup>2</sup> SVM * @param useL1 <tt>true</tt> to use the L<sup>1</sup> form, <tt>false</tt> to use the L<sup>2</sup> form. */ public void setUseL1(boolean useL1) { this.useL1 = useL1; } /** * Returns <tt>true</tt> if the L<sup>1</sup> form is in use * @return <tt>true</tt> if the L<sup>1</sup> form is in use */ public boolean isUseL1() { return useL1; } /** * Sets the maximum number of iterations allowed through the whole training * set. * @param maxIterations the maximum number of training epochs */ public void setMaxIterations(int maxIterations) { if(maxIterations <= 0) throw new IllegalArgumentException("Number of iterations must be positive, not " + maxIterations); this.maxIterations = maxIterations; } /** * Returns the maximum number of allowed training epochs * @return the maximum number of allowed training epochs */ public int getMaxIterations() { return maxIterations; } /** * Sets whether or not an implicit bias term should be added to the inputs. * @param useBias {@code true} to add an implicit bias term to inputs, * {@code false} to use the input data as provided. */ public void setUseBias(boolean useBias) { this.useBias = useBias; } /** * Returns {@code true} if an implicit bias term is in use, or {@code false} * if not. * @return {@code true} if an implicit bias term is in use, or {@code false} * if not. */ public boolean isUseBias() { return useBias; } @Override public Vec getRawWeight() { return w; } @Override public double getBias() { return bias; } @Override public Vec getRawWeight(int index) { if(index < 1) return getRawWeight(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public double getBias(int index) { if (index < 1) return getBias(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public int numWeightsVecs() { return 1; } @Override public CategoricalResults classify(DataPoint data) { if(w == null) throw new UntrainedModelException("The model has not been trained"); CategoricalResults cr = new CategoricalResults(2); if(getScore(data) < 0) cr.setProb(0, 1.0); else cr.setProb(1, 1.0); return cr; } @Override public double getScore(DataPoint dp) { return w.dot(dp.getNumericalValues())+bias; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { train(dataSet, (Classifier)null); } @Override public void train(ClassificationDataSet dataSet, Classifier warmSolution, boolean parallel) { train(dataSet, warmSolution); } @Override public void train(ClassificationDataSet dataSet, Classifier warmSolution) { if(dataSet.getClassSize() != 2) throw new FailedToFitException("SVM only supports binary classificaiton problems"); vecs = new Vec[dataSet.size()]; alpha = new double[vecs.length]; y = new double[vecs.length]; bias = 0; final double[] Qhs = new double[vecs.length];//Q hats final double[] U = new double[vecs.length], D = new double[vecs.length]; for(int i = 0; i < dataSet.size(); i++) { final DataPoint dp = dataSet.getDataPoint(i); vecs[i] = dp.getNumericalValues(); y[i] = dataSet.getDataPointCategory(i)*2-1; U[i] = getU(dataSet.getWeight(i)); D[i] = getD(dataSet.getWeight(i)); Qhs[i] = vecs[i].dot(vecs[i])+D[i]; if(useBias)//+1 for implicit bias term Qhs[i]++; } w = new DenseVector(vecs[0].length()); List<Integer> A = new IntList(vecs.length); ListUtils.addRange(A, 0, vecs.length, 1); if(warmSolution != null) { //TODO the below code works OK for warm starting classification problems, but we also need code that works well for warm starting the regression problems to meet the API contract. Having more difficulty with that one. // if (warmSolution instanceof SimpleWeightVectorModel) // { // SimpleWeightVectorModel swvm = (SimpleWeightVectorModel) warmSolution; // if (swvm.numWeightsVecs() != 1) // throw new FailedToFitException("Can not warm start from given solution, it has more than 1 weight vector"); // // Vec w_warm = swvm.getRawWeight(0); // double b_warm = useBias ? swvm.getBias(0) : 0; // //we can't just copy the values in b/c we need the solution to always be a linear combination of the training data // //we we use it to guess at alpha values // Iterator<Integer> iter = A.iterator(); // while (iter.hasNext()) // { // int i = iter.next(); // double error = max(1 - y[i] * (vecs[i].dot(w_warm) + b_warm), 0); // if (!useL1) // error *= error; // error = min(C*error, U[i]) * y[i]; // alpha[i] = abs(error); // if(error != 0) // { // w.mutableAdd(error, vecs[i]); // bias += error; // } // } // } if(warmSolution instanceof DCDs) { DCDs other = (DCDs) warmSolution; if (this.alpha != null && other.alpha.length != this.alpha.length) throw new FailedToFitException("Warm solution could not have been trained on the same data set"); double C_mul = this.C/other.C; other.w.copyTo(this.w); this.w.mutableMultiply(C); this.bias = other.bias*C_mul; System.arraycopy(other.alpha, 0, this.alpha, 0, this.alpha.length); for(int i = 0; i < this.alpha.length; i++) this.alpha[i] *= C_mul; } else throw new FailedToFitException("Warm solution can not be used for warm start"); } double M = Double.NEGATIVE_INFINITY; double m = Double.POSITIVE_INFINITY; boolean noShrinking = false; /* * From profling Shufling & RNG generation takes a suprising amount of * time on some data sets, so use one of our fast ones */ Random rand = RandomUtil.getRandom(); for(int t = 0; t < maxIterations; t++ ) { Collections.shuffle(A, rand); M = Double.NEGATIVE_INFINITY; m = Double.POSITIVE_INFINITY; Iterator<Integer> iter = A.iterator(); while(iter.hasNext())//2. { int i = iter.next(); //a final double G = y[i]*(w.dot(vecs[i])+bias)-1+D[i]*alpha[i];//bias will be zero if usebias is off //b double PG = 0; if(alpha[i] == 0) { if(G > M && !noShrinking) iter.remove(); if(G < 0) PG = G; } else if(alpha[i] == U[i]) { if(G < m && !noShrinking) iter.remove(); if(G > 0) PG = G; } else PG = G; //c M = Math.max(M, PG); m = Math.min(m, PG); //d if(PG != 0) { double alphaOld = alpha[i]; alpha[i] = Math.min(Math.max(alpha[i]-G/Qhs[i], 0), U[i]); double scale = (alpha[i]-alphaOld)*y[i]; w.mutableAdd(scale, vecs[i]); if(useBias) bias += scale; } } if(M - m < tolerance)//3. { //a if(A.size() == alpha.length) break;//We have converged else //repeat without shrinking { A.clear(); ListUtils.addRange(A, 0, vecs.length, 1); noShrinking = true; } } else if(M <= 0 || m >= 0)//technically less agressive then the original paper noShrinking = true; else noShrinking = false; } //dual problem variables are no longer needed vecs = null; y = null; //don't delete alpha incase we want to warm start from it } @Override public boolean supportsWeightedData() { return true; } @Override public boolean warmFromSameDataOnly() { return true; } @Override public DCDs clone() { DCDs clone = new DCDs(maxIterations, tolerance, C, useL1); clone.bias = this.bias; clone.useBias = this.useBias; if(this.w != null) clone.w = this.w.clone(); if(this.alpha != null) clone.alpha = Arrays.copyOf(this.alpha, this.alpha.length); return clone; } @Override public double regress(DataPoint data) { return w.dot(data.getNumericalValues())+bias; } @Override public void train(RegressionDataSet dataSet, Regressor warmSolution, boolean parallel) { train(dataSet, warmSolution); } @Override public void train(RegressionDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(RegressionDataSet dataSet) { train(dataSet, (Regressor) null); } @Override public void train(RegressionDataSet dataSet, Regressor warmSolution) { vecs = new Vec[dataSet.size()]; /** * Makes the Beta vector in the Algo 4 description */ alpha = new double[vecs.length]; y = new double[vecs.length]; bias = 0; final double[] Qhs = new double[vecs.length];//Q hats final double[] U = new double[vecs.length], lambda = new double[vecs.length]; double v_0 = 0; for(int i = 0; i < dataSet.size(); i++) { final DataPoint dp = dataSet.getDataPoint(i); vecs[i] = dp.getNumericalValues(); y[i] = dataSet.getTargetValue(i); U[i] = getU(dataSet.getWeight(i)); lambda[i] = getD(dataSet.getWeight(i)); Qhs[i] = vecs[i].dot(vecs[i])+lambda[i]; if (useBias) Qhs[i] += 1.0; v_0 += Math.abs(eq24(0, -y[i]-eps, -y[i]+eps, U[i])); } w = new DenseVector(vecs[0].length()); IntList activeSet = new IntList(2*vecs.length); ListUtils.addRange(activeSet, 0, vecs.length, 1); if(warmSolution != null) { if(warmSolution instanceof DCDs) { DCDs other = (DCDs) warmSolution; if (this.alpha != null && other.alpha.length != this.alpha.length) throw new FailedToFitException("Warm solution could not have been trained on the same data set"); double C_mul = this.C/other.C; other.w.copyTo(this.w); this.w.mutableMultiply(C); this.bias = other.bias*C_mul; System.arraycopy(other.alpha, 0, this.alpha, 0, this.alpha.length); for(int i = 0; i < this.alpha.length; i++) this.alpha[i] *= C_mul; } else throw new FailedToFitException("Warm solution can not be used for warm start"); } /* * From profling Shufling & RNG generation takes a suprising amount of * time on some data sets, so use one of our fast ones */ Random rand = RandomUtil.getRandom(); double M = Double.POSITIVE_INFINITY; for(int iteration = 0; iteration < maxIterations; iteration++) { double maxVk = Double.NEGATIVE_INFINITY; double vKSum = 0; //6.1 Randomly permute T Collections.shuffle(activeSet, rand); //6.2 For i in T Iterator<Integer> iter = activeSet.iterator(); while(iter.hasNext()) { final int i = iter.next(); final double y_i = y[i]; final Vec x_i = vecs[i]; final double wDotX = w.dot(x_i)+bias; final double g = -y_i + wDotX + lambda[i] * alpha[i]; final double gP = g + eps; final double gN = g - eps; final double v_i = eq24(alpha[i], gN, gP, U[i]); maxVk = Math.max(maxVk, v_i); vKSum += Math.abs(v_i); //6.2.3 shrinking work //eq (26) beta_i = 0 and g'n(βi) < −M < 0 <M < g'p(βi) boolean shrink = false; if(alpha[i] == 0 && gN < -M && -M < 0 && M < gP) shrink = true; if( (alpha[i] == U[i] && gP < -M) || (alpha[i] == -U[i] && gN > M)) shrink = true; if(shrink) iter.remove(); //eq (22) final double Q_ii = Qhs[i]; final double d; if (gP < Q_ii * alpha[i]) d = -gP / Q_ii; else if (gN > Q_ii * alpha[i]) d = -gN / Q_ii; else d = -alpha[i]; if (Math.abs(d) < 1e-14) continue; //s = max(−U, min(U,beta_i +d)) eq (21) final double s = Math.max(-U[i], Math.min(U[i], alpha[i]+d)); w.mutableAdd(s-alpha[i], x_i); if(useBias) bias += (s-alpha[i]); alpha[i] = s; } //convergence check if(vKSum/v_0 < tolerance)//converged { if(activeSet.size() == vecs.length)//we converged on all the data break; else//reset to do a pass through the whole data set { activeSet.clear(); ListUtils.addRange(activeSet, 0, vecs.length, 1); M = Double.POSITIVE_INFINITY; } } else { M = maxVk; } } y = null; vecs = null; } private double getU(double w) { if(useL1) return C*w; else return Double.POSITIVE_INFINITY; } private double getD(double w) { if(useL1) return 0; else return 1/(2*C*w); } /** * returns the result of evaluation equation 24 of an individual index * @param beta_i the weight coefficent value * @param gN the g'<sub>n</sub>(beta_i) value * @param gP the g'<sub>p</sub>(beta_i) value * @param U the upper bound value obtained from {@link #getU(double) } * @return the result of equation 24 */ protected static double eq24(final double beta_i, final double gN, final double gP, final double U) { //6.2.2 double vi = 0;//Used as "other" value if(beta_i == 0)//if beta_i = 0 ... { //if beta_i = 0 and g'n(beta_i) >= 0 if(gN >= 0) vi = gN; else if(gP <= 0) //if beta_i = 0 and g'p(beta_i) <= 0 vi = -gP; } else//beta_i is non zero { //Two cases //if beta_i in (−U, 0), or //beta_i = −U and g'n(beta_i) <= 0 //then v_i = |g'n| //if beta_i in (0,U), or //beta_i = U and g'p(βi) >= 0 //then v_i = |g'p| if(beta_i < 0)//first set of cases { if(beta_i > -U || (beta_i == -U && gN <= 0)) vi = Math.abs(gN); } else//second case { if(beta_i < U || (beta_i == U && gP >= 0)) vi = Math.abs(gP); } } return vi; } /** * Guess the distribution to use for the regularization term * {@link #setC(double) C} in a SVM. * * @param d the data set to get the guess for * @return the guess for the C parameter in the SVM */ public static Distribution guessC(DataSet d) { return PlattSMO.guessC(d); } }
23,439
31.153635
230
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/svm/DCSVM.java
/* * Copyright (C) 2016 Edward Raff * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package jsat.classifiers.svm; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.logging.Level; import java.util.logging.Logger; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.Classifier; import jsat.classifiers.DataPoint; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.classifiers.neuralnetwork.RBFNet; import jsat.clustering.kmeans.ElkanKernelKMeans; import jsat.clustering.kmeans.KernelKMeans; import jsat.clustering.kmeans.LloydKernelKMeans; import jsat.distributions.kernels.KernelTrick; import jsat.distributions.kernels.RBFKernel; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.linear.Vec; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.utils.DoubleList; import jsat.utils.FakeExecutor; import jsat.utils.IntList; import jsat.utils.IntSet; import jsat.utils.ListUtils; import jsat.utils.SystemInfo; import jsat.utils.concurrent.ParallelUtils; /** * This is an implementation of the Divide-and-Conquer Support Vector Machine * (DC-SVM). It uses a a combination of clustering and warm-starting to train * faster, as well as an early stopping strategy to provide a fast approximate * SVM solution. The final accuracy should often be at or near that of a normal * SVM, while being faster to train. <br> * <br> * The current implementation is based on {@link SVMnoBias}, meaning this code * does not have a bias term and it only works with normalized kernels. Any * non-normalized kernel will be normalized automatically. This is not a problem * for the common RBF kernel.<br> * <br> * * See: * <ul> * <li>Hsieh, C.-J., Si, S., & Dhillon, I. S. (2014). <i>A Divide-and-Conquer * Solver for Kernel Support Vector Machines</i>. In Proceedings of the 31st * International Conference on Machine Learning. Beijing, China.</li> * </ul> * * @author Edward Raff */ public class DCSVM extends SupportVectorLearner implements Classifier, Parameterized, BinaryScoreClassifier { private double C = 1; private double tolerance = 1e-3; private KernelKMeans clusters; private int m = 2000; private int l_max = 4; private int l_early = 3; private int k = 4; private Map<Integer, SVMnoBias> early_models; private long cache_size = 0; /** * Creates a new DC-SVM for the given kernel * @param k the kernel to use */ public DCSVM(KernelTrick k) { super(k, CacheMode.ROWS); this.cache_size = Runtime.getRuntime().freeMemory()/2; } /** * Creates a new DC-SVM for the RBF kernel */ public DCSVM() { this(new RBFKernel()); } /** * Copy Constructor * @param toCopy object to copy */ public DCSVM(DCSVM toCopy) { super(toCopy); this.C = toCopy.C; this.tolerance = toCopy.tolerance; if(toCopy.clusters != null) this.clusters = toCopy.clusters.clone(); this.cache_size = toCopy.cache_size; this.m = toCopy.m; this.l_early = toCopy.l_early; this.l_max = toCopy.l_max; this.k = toCopy.k; if(toCopy.early_models != null) { this.early_models = new ConcurrentHashMap<Integer, SVMnoBias>(); for(Map.Entry<Integer, SVMnoBias> x : toCopy.early_models.entrySet()) this.early_models.put(x.getKey(), x.getValue().clone()); } } /** * The DC-SVM algorithm works by creating a hierarchy of levels, and * iteratively refining the solution from one level to the next. Level 0 * corresponds to the exact SVM solution, and higher levels are courser * approximations. This method controls which level the training starts at. * * @param l_max which level to start the training at. */ public void setStartLevel(int l_max) { if(l_max < 0) throw new IllegalArgumentException("l_max must be a non-negative integer, not " + l_max); this.l_max = l_max; } /** * * @return which level to start the training at. */ public int getStartLevel() { return l_max; } /** * The DC-SVM algorithm works by creating a hierarchy of levels, and * iteratively refining the solution from one level to the next. Level 0 * corresponds to the exact SVM solution, and higher levels are courser * approximations. This method controls which level the training stops at, * with 0 being the latest it can stop. The default stopping level is 3. * * @param l_early which level to stop the training at, and use for * classification. */ public void setEndLevel(int l_early) { if(l_early < 0) throw new IllegalArgumentException("l_early must be a non-negative integer, not " + l_early); this.l_early = l_early; } /** * * @return which level to stop the training at, and use for * classification. */ public int getEndLevel() { return l_early; } /** * At each level of the DC-SVM training, a clustering algorithm is used to * divide the dataset into sub-groups for independent training. Increasing * the number of points used for clustering improves model accuracy, but * also increases training time. The default value is 2000. This value may * need to be increased if using a higher staring level. * * @param m the number of data points to sample for each cluster size */ public void setClusterSampleSize(int m) { if(m <= 0) throw new IllegalArgumentException("Cluster Sample Size must be a positive integer, not " + m); this.m = m; } public int getClusterSampleSize() { return m; } @Override public CategoricalResults classify(DataPoint data) { CategoricalResults cr = new CategoricalResults(2); double sum = getScore(data); if (sum > 0) cr.setProb(1, 1); else cr.setProb(0, 1); return cr; } @Override public double getScore(DataPoint dp) { if (vecs == null) throw new UntrainedModelException("Classifier has yet to be trained"); Vec x = dp.getNumericalValues(); int c; if(early_models.size() > 1) c = clusters.findClosestCluster(x, getKernel().getQueryInfo(x)); else c = 0; return early_models.get(c).getScore(dp); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { final int N = dataSet.size(); vecs = dataSet.getDataVectors(); early_models = new ConcurrentHashMap<>(); // weights = dataSet.getDataWeights(); // label = new short[N]; // for(int i = 0; i < N; i++) // label[i] = (short) (dataSet.getDataPointCategory(i)*2-1); setCacheMode(CacheMode.NONE);//Initiates the accel cache //initialize alphas array to all zero alphas = new double[N];//zero is default value /** * Used to keep track of which sub cluster each training datapoint belongs to */ final int[] group = new int[N]; /** * Used to select subsamples of data points for clustering, and to map them back to their original indicies */ IntList indicies = new IntList(); //for l = lmax, . . . , 1 do for(int l = l_max; l >= l_early; l--) { // System.out.println("Level " + l); early_models.clear(); //sub-sampled dataset to use for clustering ClassificationDataSet toCluster = new ClassificationDataSet(dataSet.getNumNumericalVars(), dataSet.getCategories(), dataSet.getPredicting()); //Set number of clusters in the current level k_l = k^l int k_l = (int) Math.pow(k, l); //number of datapoints to use in clustering //increase M = m by default. Increase to M=7 m if less than 7 points per cluster int M; if( N/k_l < 7 ) M = k_l*7; else M = m; if(l == l_max) { ListUtils.addRange(indicies, 0, N, 1); Collections.shuffle(indicies); for(int i = 0; i < Math.min(M, N); i++) toCluster.addDataPoint(dataSet.getDataPoint(i), dataSet.getDataPointCategory(i)); } else { indicies.clear(); for(int i = 0; i < N; i++) if(alphas[i] != 0) indicies.add(i); Collections.shuffle(indicies); for(int i = 0; i < Math.min(M, indicies.size()); i++) toCluster.addDataPoint(dataSet.getDataPoint(i), dataSet.getDataPointCategory(i)); } //Run kernel kmeans on {xi1, . . . ,xim} to get cluster centers c1, . . . , ckl ; clusters = new ElkanKernelKMeans(getKernel()); clusters.setMaximumIterations(100); // System.out.println("Finding " + k_l + " clusters"); k_l = Math.min(k_l, toCluster.size()/2);//Few support vectors? Make clustering smaller then int[] sub_results; if(k_l <= 1)//dont run cluster, we are doing final refinement step! { sub_results = new int[N];//will be all 0, for 1 'cluster' indicies.clear(); ListUtils.addRange(indicies, 0, N, 1); } else sub_results = clusters.cluster(toCluster, k_l, parallel, (int[])null); //create partitioning //First, don't bother with distance computations for people we just clustered Arrays.fill(group, -1); Set<Integer> found_clusters = new IntSet(k_l); for(int i = 0; i < sub_results.length; i++) { group[indicies.get(i)] = sub_results[i]; found_clusters.add(sub_results[i]); } //find who everyone else belongs to ParallelUtils.run(parallel, N, (i)-> { if (group[i] >= 0) return;//you already got assigned above List<Double> qi = null; if (accelCache != null) { int multiplier = accelCache.size() / N; qi = accelCache.subList(i * multiplier, i * multiplier + multiplier); } group[i] = clusters.findClosestCluster(vecs.get(i), qi); }); //everyone has now been assigned to their closest cluster //build SVM model for each cluster for(int c : found_clusters) { // System.out.println("\tBuilding model for " + c); ClassificationDataSet V_c = new ClassificationDataSet(dataSet.getNumNumericalVars(), dataSet.getCategories(), dataSet.getPredicting()); DoubleList V_alphas = new DoubleList(); IntList orig_index = new IntList(); for (int i = 0; i < N; i++) { if (group[i] != c) continue;//well get to you later //else, create dataset V_c.addDataPoint(dataSet.getDataPoint(i), dataSet.getDataPointCategory(i)); V_alphas.add(Math.abs(alphas[i])); orig_index.add(i); } SVMnoBias svm = new SVMnoBias(getKernel()); if(cache_size > 0) svm.setCacheSize(V_alphas.size(), cache_size); else svm.setCacheMode(CacheMode.NONE); //Train model if(l == l_max)//first round, no warm start svm.train(V_c, parallel); else//warm start { svm.train(V_c, V_alphas.getBackingArray(), parallel); } early_models.put(c, svm); //Update larger set of alphas for(int i = 0; i < orig_index.size(); i++) this.alphas[orig_index.get(i)] = svm.alphas[i]; } } if(l_early == 0)//fully solve the problem! Refinement step was done implicitly in above loop { SVMnoBias svm = new SVMnoBias(getKernel()); if (cache_size > 0) svm.setCacheSize(dataSet.size(), cache_size ); else svm.setCacheMode(CacheMode.NONE); svm.train(dataSet, Arrays.copyOf(this.alphas, this.alphas.length), parallel); early_models.clear(); early_models.put(0, svm); //Update all alphas for (int i = 0; i < N; i++) this.alphas[i] = svm.alphas[i]; } } @Override public boolean supportsWeightedData() { return true; } @Override public DCSVM clone() { return new DCSVM(this); } /** * Sets the complexity parameter of SVM. The larger the C value the harder * the margin SVM will attempt to find. Lower values of C allow for more * misclassification errors. * @param C the soft margin parameter */ @Parameter.WarmParameter(prefLowToHigh = true) public void setC(double C) { if(C <= 0) throw new ArithmeticException("C must be a positive constant"); this.C = C; } /** * Returns the soft margin complexity parameter of the SVM * @return the complexity parameter of the SVM */ public double getC() { return C; } }
14,978
34.161972
153
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/svm/LSSVM.java
package jsat.classifiers.svm; import java.util.concurrent.ExecutorService; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.DataPoint; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.distributions.kernels.KernelTrick; import jsat.regression.RegressionDataSet; import jsat.regression.Regressor; import static java.lang.Math.*; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.*; import java.util.stream.IntStream; import jsat.DataSet; import jsat.classifiers.*; import jsat.distributions.Distribution; import jsat.distributions.kernels.LinearKernel; import jsat.exceptions.FailedToFitException; import jsat.parameters.Parameterized; import jsat.parameters.Parameter.WarmParameter; import jsat.regression.*; import jsat.utils.FakeExecutor; import jsat.utils.PairedReturn; import jsat.utils.SystemInfo; import jsat.utils.concurrent.ParallelUtils; import static jsat.utils.concurrent.ParallelUtils.*; /** * The Least Squares Support Vector Machine (LS-SVM) is an alternative to the * standard SVM classifier for regression and binary classification problems. It * can be faster to train, but is usually significantly slower to perform * predictions with. This is because the LS-SVM solution is dense, so all * training points become support vectors. <br> * <br> * The LS-SVM algorithm may be warm started only from another LS-SVM object * trained on the same data set. <br> * <br> * NOTE: A SMO implementation similar to {@link PlattSMO} is used. This is done * because is can easily operate without explicitly forming the whole kernel * matrix. However it is recommended to use the LS-SVM when the problem size is * small enough such that {@link SupportVectorLearner.CacheMode#FULL} can be * used. <br> * <br> * If <i>N</i> is the number of data points:<br> * <ul> * <li>Training complexity is roughly O(n^3), but can be lower for small C</li> * <li>Prediction complexity is O(n)</li> * <li>This implementation is multi-threaded, but scales best when there are * several thousand data points per core. For smaller problems, especially when * full cache mode can be used, there may be negative speedups when using the * parallel training methods</li> * </ul> * <br> * See: <br> * <ul> * <li>Suykens, J.,&amp;Vandewalle, J. (1999). <i>Least Squares Support Vector * Machine Classifiers</i>. Neural processing letters, 9(3), 293–298. * doi:10.1023/A:1018628609742</li> * <li>Keerthi, S. S.,&amp;Shevade, S. K. (2003). <i>SMO algorithm for Least * Squares SVM</i>. In Proceedings of the International Joint Conference on * Neural Networks (Vol. 3, pp. 2088–2093). IEEE. doi:10.1109/IJCNN.2003.1223730 * </li> * </ul> * * * @author Edward Raff */ public class LSSVM extends SupportVectorLearner implements BinaryScoreClassifier, Regressor, Parameterized, WarmRegressor, WarmClassifier { private static final long serialVersionUID = -7569924400631719451L; protected double b = 0, b_low, b_up; private double C = 1; private int i_up, i_low; private double[] fcache; private double dualObjective; private static double epsilon = 1e-12; private static double tol = 1e-3; /** * Creates a new LS-SVM learner that uses a linear model and does not use a * cache */ public LSSVM() { this(new LinearKernel()); } /** * Creates a new LS-SVM learner that does not use a cache * @param kernel the kernel method to use */ public LSSVM(KernelTrick kernel) { this(kernel, CacheMode.NONE); } /** * Creates a new LS-SVM learner * @param kernel the kernel method to use * @param cacheMode the caching scheme to use for kernel evaluations */ public LSSVM(KernelTrick kernel, CacheMode cacheMode) { super(kernel, cacheMode); } /** * Creates a deep copy of another LS-SVM * @param toCopy the object to copy */ public LSSVM(LSSVM toCopy) { super(toCopy.getKernel().clone(), toCopy.getCacheMode()); this.b_low = toCopy.b_low; this.b_up = toCopy.b_up; this.i_up = toCopy.i_up; this.i_low = toCopy.i_low; this.C = toCopy.C; if(toCopy.alphas != null) this.alphas = Arrays.copyOf(toCopy.alphas, toCopy.alphas.length); if(toCopy.fcache != null) this.fcache = Arrays.copyOf(toCopy.fcache, toCopy.fcache.length); } /** * Sets the regularization constant when training. Lower values correspond * to higher amounts of regularization. * @param C the positive regularization parameter */ @WarmParameter(prefLowToHigh = true) public void setC(double C) { if(C <= 0 || Double.isNaN(C) || Double.isInfinite(C)) throw new IllegalArgumentException("C must be in (0, Infty), not " + C); this.C = C; } /** * Returns the regularization parameter value used * @return the regularization parameter value */ public double getC() { return C; } private boolean takeStep(int i1, int i2, ExecutorService ex, boolean parallel) throws InterruptedException, ExecutionException { //these 2 will hold the old values final double alph1 = alphas[i1]; final double alph2 = alphas[i2]; double F1 = fcache[i1]; double F2 = fcache[i2]; double gamma = alph1+alph2; final double k11 = kEval(i1, i1); final double k12 = kEval(i2, i1); final double k22 = kEval(i2, i2); final double eta = 2*k12-k11-k22; final double a2 = alph2-(F1-F2)/eta; if(abs(a2-alph2) < epsilon*(a2+alph2+epsilon)) return false; final double a1 = gamma-a2; alphas[i1] = a1; alphas[i2] = a2; //Update the DualObjectiveFunction using (4.11) double t = (F1-F2)/eta; dualObjective -= eta/2*t*t; //2 steps done in the same loop b_up = Double.NEGATIVE_INFINITY; b_low = Double.POSITIVE_INFINITY; //Update Fcache[i] for all i in I using (4.10) //Compute (i_low, b_low) and (i_up, b_up) using (3.4) ParallelUtils.run(parallel, fcache.length, (from, to) -> { int i_low_cand = from; int i_up_cand = from; double b_up_p = Double.NEGATIVE_INFINITY, b_low_p = Double.POSITIVE_INFINITY; for (int i = from; i < to; i++) { final double k_i1 = kEval(i1, i); final double k_i2 = kEval(i2, i); final double Fi = (fcache[i] += (a1 - alph1) * k_i1 + (a2 - alph2) * k_i2); if(Fi > b_up_p) { b_up_p = Fi; i_up_cand = i; } if(Fi < b_low_p) { b_low_p = Fi; i_low_cand = i; } } synchronized (fcache) { if (fcache[i_up_cand] > b_up) { b_up = fcache[i_up_cand]; i_up = i_up_cand; } if (fcache[i_low_cand] < b_low) { b_low = fcache[i_low_cand]; i_low = i_low_cand; } } }, ex); return true; } @Override public boolean warmFromSameDataOnly() { return true; } private double computeDualityGap(boolean fast, boolean parallel) throws InterruptedException, ExecutionException { //Below we use the IntStream rather than parallelUtil's range b/c the sequence should be long enough to actually get parallelism, and it will use less memory double gap = 0; //set b using (3.16) or (3.17) if(fast) b = (b_up+b_low)/2; else { b = ParallelUtils.streamP(IntStream.range(0, alphas.length), parallel).mapToDouble( i -> { return fcache[i]-alphas[i]/C; }).sum(); b /= alphas.length; } gap = ParallelUtils.streamP(IntStream.range(0, alphas.length), parallel).mapToDouble( i -> { final double x_i = b + alphas[i]/C - fcache[i]; return alphas[i]*(fcache[i]-(0.5*alphas[i]/C)) + C*x_i*x_i/2; }).sum(); return gap; } private void initializeVariables(double[] targets, LSSVM warmSolution, DataSet data) { alphas = new double[targets.length]; fcache = new double[targets.length]; dualObjective = 0; if(warmSolution != null) { if(warmSolution.alphas.length != this.alphas.length) throw new FailedToFitException("Warm LS-SVM solution could not have been trained on the sama data, different number of alpha values present"); double C_ratio = this.C/warmSolution.C; for(int i = 0; i < targets.length; i++) { alphas[i] = warmSolution.alphas[i]; fcache[i] = warmSolution.fcache[i]-(C_ratio-1)*warmSolution.alphas[i]/(this.C); dualObjective += alphas[i]*(targets[i]-fcache[i]); } dualObjective /= 2; } else { for(int i = 0; i < targets.length; i++) fcache[i] = -targets[i]; } //Compute (i_low, b_low) and (i_up, b_up) using (3.4) b_up = Double.NEGATIVE_INFINITY; b_low = Double.POSITIVE_INFINITY; //Update Fcache[i] for all i in I using (4.10) //Compute (i_low, b_low) and (i_up, b_up) using (3.4) for (int i = 0; i < fcache.length; i++) { final double Fi = fcache[i]; if(Fi > b_up) { b_up = Fi; i_up = i; } if(Fi < b_low) { b_low = Fi; i_low = i; } } setCacheMode(getCacheMode());//Initializes the cahce } @Override public double getScore(DataPoint dp) { return regress(dp); } @Override public CategoricalResults classify(DataPoint data) { CategoricalResults cr = new CategoricalResults(2); if(regress(data) > 0) cr.setProb(1, 1.0); else cr.setProb(0, 1.0); return cr; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet, null, parallel); } @Override public void train(RegressionDataSet dataSet, Regressor warmSolution, boolean parallel) { if(warmSolution != null && !(warmSolution instanceof LSSVM)) throw new FailedToFitException("Warm solution must be an implementation of LS-SVM, not " + warmSolution.getClass()); double[] targets = dataSet.getTargetValues().arrayCopy(); mainLoop(dataSet, (LSSVM)warmSolution, targets, parallel); } @Override public void train(RegressionDataSet dataSet, Regressor warmSolution) { train(dataSet, warmSolution, false); } @Override public void train(ClassificationDataSet dataSet, Classifier warmSolution, boolean parallel) { if(dataSet.getClassSize() != 2) throw new FailedToFitException("LS-SVM only supports binary classification problems"); if(warmSolution != null && !(warmSolution instanceof LSSVM)) throw new FailedToFitException("Warm solution must be an implementation of LS-SVM, not " + warmSolution.getClass()); double[] targets = new double[dataSet.size()]; for(int i = 0; i < dataSet.size(); i++) targets[i] = dataSet.getDataPointCategory(i)*2-1; mainLoop(dataSet, (LSSVM) warmSolution , targets, parallel); } @Override public void train(ClassificationDataSet dataSet, Classifier warmSolution) { train(dataSet, warmSolution, false); } @Override public boolean supportsWeightedData() { return false; } @Override public double regress(DataPoint data) { return kEvalSum(data.getNumericalValues())-b; } @Override public void train(RegressionDataSet dataSet, boolean parallel) { train(dataSet, null, parallel); } @Override public LSSVM clone() { return new LSSVM(this); } private void mainLoop(DataSet dataSet, LSSVM warmSolution, double[] targets, boolean parallel) { try { ExecutorService ex = ParallelUtils.getNewExecutor(parallel); vecs = dataSet.getDataVectors(); initializeVariables(targets, warmSolution, dataSet); boolean change = true; double dualityGap = computeDualityGap(true, parallel); int iter = 0; while (dualityGap > tol * dualObjective && change) { change = takeStep(i_up, i_low, ex, parallel); dualityGap = computeDualityGap(true, parallel); iter++; } setCacheMode(null); setAlphas(alphas); } catch (InterruptedException interruptedException) { throw new FailedToFitException(interruptedException); } catch (ExecutionException executionException) { throw new FailedToFitException(executionException); } } /** * Guess the distribution to use for the regularization term * {@link #setC(double) C} in a LS-SVM. * * @param d the data set to get the guess for * @return the guess for the C parameter in the LS-SVM */ public static Distribution guessC(DataSet d) { return PlattSMO.guessC(d);//LS-SVM isn't technically the same algo, but still a good search } }
14,172
31.656682
165
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/svm/Pegasos.java
package jsat.classifiers.svm; import java.util.*; import java.util.concurrent.ExecutorService; import jsat.DataSet; import jsat.SingleWeightVectorModel; import jsat.classifiers.*; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.distributions.Distribution; import jsat.distributions.Gamma; import jsat.distributions.LogUniform; import jsat.exceptions.FailedToFitException; import jsat.linear.*; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.utils.IntList; import jsat.utils.ListUtils; /** * Implements the linear kernel mini-batch version of the Pegasos SVM * classifier. It performs updates stochastically and is very fast. <br> * Because Pegasos updates the primal directly, there are no support vectors * saved from the training set. * <br><br> * See: Shalev-Shwartz, S., Singer, Y.,&amp;Srebro, N. (2007). <i>Pegasos : Primal * Estimated sub-GrAdient SOlver for SVM</i>. 24th international conference on * Machine learning (pp. 807–814). New York, NY: ACM. * doi:10.1145/1273496.1273598 * * @author Edward Raff */ public class Pegasos implements BinaryScoreClassifier, Parameterized, SingleWeightVectorModel { private static final long serialVersionUID = -2145631476467081171L; private int epochs; private double reg; private int batchSize; private boolean projectionStep = false; private Vec w; private double bias; /** * The default number of epochs is {@value #DEFAULT_EPOCHS} */ public static final int DEFAULT_EPOCHS = 5; /** * The default regularization value is {@value #DEFAULT_REG} */ public static final double DEFAULT_REG = 1e-4; /** * The default batch size is {@value #DEFAULT_BATCH_SIZE} */ public static final int DEFAULT_BATCH_SIZE = 1; /** * Creates a new Pegasos SVM classifier using default values. */ public Pegasos() { this(DEFAULT_EPOCHS, DEFAULT_REG, DEFAULT_BATCH_SIZE); } /** * Creates a new Pegasos SVM classifier * @param epochs the number of training iterations * @param reg the regularization term * @param batchSize the batch size */ public Pegasos(int epochs, double reg, int batchSize) { setEpochs(epochs); setRegularization(reg); setBatchSize(batchSize); } /** * Copy constructor * @param toCopy the object to copy */ public Pegasos(Pegasos toCopy) { this.epochs = toCopy.epochs; this.reg = toCopy.reg; this.batchSize = toCopy.batchSize; if(toCopy.w != null) this.w = toCopy.w.clone(); this.bias = toCopy.bias; this.projectionStep = toCopy.projectionStep; } /** * Sets the batch size used during training. At each epoch, a batch of * randomly selected data points will be used to update. * * @param batchSize the number of data points to use when updating */ public void setBatchSize(int batchSize) { if(batchSize < 1) throw new ArithmeticException("At least one sample must be take at each iteration"); this.batchSize = batchSize; } /** * Returns the number of points used in each iteration * @return the number of points used in each iteration */ public int getBatchSize() { return batchSize; } /** * Sets the number of iterations through the training set that will be * performed. * @param epochs the number of iterations */ public void setEpochs(int epochs) { if(epochs < 1) throw new ArithmeticException("Must perform a positive number of epochs"); this.epochs = epochs; } /** * Returns the number of iterations of updating that will be done * @return the number of iterations */ public double getEpochs() { return epochs; } /** * Sets whether or not to use the projection step after each update per * iteration * * @param projectionStep whether or not to use the projection step */ public void setProjectionStep(boolean projectionStep) { this.projectionStep = projectionStep; } /** * Returns whether or not the projection step is in use after each iteration * @return <tt>true</tt> if the projection step will be performed */ public boolean isProjectionStep() { return projectionStep; } /** * Sets the regularization constant used for learning. The regularization * must be positive, and the learning rate is proportional to the * regularization value. This means regularizations very near zero will * take a long time to converge. * * @param reg the regularization to apply */ public void setRegularization(double reg) { if(Double.isInfinite(reg) || Double.isNaN(reg) || reg <= 0.0) throw new ArithmeticException("Pegasos requires a positive regularization cosntant"); this.reg = reg; } /** * Returns the amount of regularization to used in training * @return the regularization parameter. */ public double getRegularization() { return reg; } @Override public Vec getRawWeight() { return w; } @Override public double getBias() { return bias; } @Override public Vec getRawWeight(int index) { if(index < 1) return getRawWeight(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public double getBias(int index) { if (index < 1) return getBias(); else throw new IndexOutOfBoundsException("Model has only 1 weight vector"); } @Override public int numWeightsVecs() { return 1; } @Override public Pegasos clone() { return new Pegasos(this); } @Override public CategoricalResults classify(DataPoint data) { CategoricalResults cr = new CategoricalResults(2); if(getScore(data) < 0) cr.setProb(0, 1.0); else cr.setProb(1, 1.0); return cr; } @Override public double getScore(DataPoint dp) { return w.dot(dp.getNumericalValues())+bias; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { if(dataSet.getClassSize() != 2) throw new FailedToFitException("SVM only supports binary classificaiton problems"); final int m = dataSet.size(); w = new DenseVector(dataSet.getNumNumericalVars()); if(projectionStep) w = new VecWithNorm(w, 0.0); w = new ScaledVector(w); bias = 0; IntList miniBatch = new IntList(batchSize); IntList randOrder = new IntList(m); ListUtils.addRange(randOrder, 0, m, 1); int t = 0; for (int epoch = 0; epoch < epochs; epoch++)//start at 1 for convinence { Collections.shuffle(randOrder); for (int indx = 0; indx < m; indx += batchSize) { t++; miniBatch.clear(); miniBatch.addAll(randOrder.subList(indx, Math.min(indx+batchSize, m))); //Filter to only the points that have the correct label Iterator<Integer> iter = miniBatch.iterator(); while (iter.hasNext()) { int i = iter.next(); if (getSign(dataSet, i) * (w.dot(getX(dataSet, i)) + bias) >= 1) iter.remove(); } final double nt = 1.0 / (reg * t); w.mutableMultiply(1.0 - nt * reg); for (int i : miniBatch) { double sign = getSign(dataSet, i); Vec x = getX(dataSet, i); final double s = sign * nt /batchSize; w.mutableAdd(s, x); bias += s; } if (projectionStep) { double norm = w.pNorm(2); double mult = Math.min(1, 1.0 / (Math.sqrt(reg) * norm)); w.mutableMultiply(mult); bias *= mult; } } } } @Override public boolean supportsWeightedData() { return false; } private Vec getX(ClassificationDataSet dataSet, int i) { return dataSet.getDataPoint(i).getNumericalValues(); } private double getSign(ClassificationDataSet dataSet, int i) { return dataSet.getDataPointCategory(i) == 1 ? 1.0 : -1.0; } /** * Guess the distribution to use for the regularization term * {@link #setRegularization(double) } in Pegasos. * * @param d the data set to get the guess for * @return the guess for the &lambda; parameter */ public static Distribution guessRegularization(DataSet d) { return new LogUniform(1e-7, 1e-2); } }
9,370
27.056886
97
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/svm/PegasosK.java
package jsat.classifiers.svm; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.DataPoint; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.distributions.kernels.KernelTrick; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.linear.Vec; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.utils.FakeExecutor; import jsat.utils.ListUtils; import jsat.utils.SystemInfo; import jsat.utils.concurrent.AtomicDouble; import jsat.utils.concurrent.ParallelUtils; import jsat.utils.random.RandomUtil; /** * Implements the kernelized version of the {@link Pegasos} algorithm for SVMs. * Unlike standard SVM algorithms, this one relies on randomness and has no * guarantee to reach the optimal solution, however it is very fast to train. * Each iteration of the algorithm randomly selects one datapoint to potentially * update the coefficient of. <br> * The resulting set of support vectors may be more or less sparse than a normal * SVM implementation. <br> * Because the Pegasos algorithm is stochastic and the kernelized updates on * errors given regularization, the kernelized version may have more difficulty * with noisy or overlapping class distributions. * <br><br> * See: Shalev-Shwartz, S., Singer, Y.,&amp;Srebro, N. (2007). <i>Pegasos : Primal * Estimated sub-GrAdient SOlver for SVM</i>. 24th international conference on * Machine learning (pp. 807–814). New York, NY: ACM. * doi:10.1145/1273496.1273598 * * @author Edward Raff */ public class PegasosK extends SupportVectorLearner implements BinaryScoreClassifier, Parameterized { private static final long serialVersionUID = 5405460830472328107L; private double regularization; private int iterations; /** * Creates a new kernelized Pegasos SVM solver * * @param regularization the amount of regularization to apply, normally a very small positive value * @param iterations the number of update iterations to perform * @param kernel the kernel to use */ public PegasosK(double regularization, int iterations, KernelTrick kernel) { this(regularization, iterations, kernel, CacheMode.NONE); } /** * Creates a new kernelized Pegasos SVM solver * * @param regularization the amount of regularization to apply, normally a very small positive value * @param iterations the number of update iterations to perform * @param kernel the kernel to use * @param cacheMode what type of kernel caching to use */ public PegasosK(double regularization, int iterations, KernelTrick kernel, CacheMode cacheMode) { super(kernel, cacheMode); setRegularization(regularization); setIterations(iterations); } /** * Sets the number of iterations of the algorithm to perform. Each iteration * may or may not update a single coefficient for a specific data point. * * @param iterations the number of learning iterations to perform */ public void setIterations(int iterations) { this.iterations = iterations; } /** * Returns the number of iterations used during training * @return the number of iterations used in training */ public int getIterations() { return iterations; } /** * Sets the amount of regularization to apply. The regularization must be a * positive value * @param regularization the amount of regularization to apply */ public void setRegularization(double regularization) { if(Double.isNaN(regularization) || Double.isInfinite(regularization) || regularization <= 0) throw new ArithmeticException("Regularization must be a positive constant, not " + regularization); this.regularization = regularization; } /** * Returns the amount of regularization used * @return the amount of regularization used */ public double getRegularization() { return regularization; } @Override public PegasosK clone() { PegasosK clone = new PegasosK(regularization, iterations, getKernel().clone(), getCacheMode()); if(this.vecs != null) { clone.vecs = new ArrayList<Vec>(vecs); clone.alphas = new double[this.alphas.length]; for(int i = 0; i < this.vecs.size(); i++) { clone.vecs.set(i, this.vecs.get(i).clone()); clone.alphas[i] = this.alphas[i]; } } return clone; } @Override public CategoricalResults classify(DataPoint data) { if(alphas == null) throw new UntrainedModelException("Model has not been trained"); CategoricalResults cr = new CategoricalResults(2); double sum = getScore(data); //SVM only says yess / no, can not give a percentage if(sum > 0) cr.setProb(1, 1); else cr.setProb(0, 1); return cr; } @Override public double getScore(DataPoint dp) { return kEvalSum(dp.getNumericalValues()); } /** * Does part of the run through the data to compute the predictoin */ private class PredictPart implements Callable<Double> { int i; int start; int end; int[] sign; public PredictPart(int i, int start, int end, int[] sign) { this.i = i; this.start = start; this.end = end; this.sign = sign; } @Override public Double call() throws Exception { final double sign_i = sign[i]; double val = 0; for(int j = start; j < end; j++) { if(j == i || alphas[j] == 0) continue; val += alphas[j]*sign_i* kEval(i, j); } return val; } } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { if (dataSet.getClassSize() != 2) throw new FailedToFitException("Pegasos only supports binary classification problems"); Random rand = RandomUtil.getRandom(); final int m = dataSet.size(); alphas = new double[m]; int[] sign = new int[m]; vecs = new ArrayList<>(m); for (int i = 0; i < dataSet.size(); i++) { vecs.add(dataSet.getDataPoint(i).getNumericalValues()); sign[i] = dataSet.getDataPointCategory(i) == 1 ? 1 : -1; } setCacheMode(getCacheMode());//Initiates the cahce for (int t = 1; t <= iterations; t++) { final int i = rand.nextInt(m); final double sign_i = sign[i]; final AtomicDouble val = new AtomicDouble(0.0); ParallelUtils.run(true, m, (start, end) -> { double val_local = 0; for(int j = start; j < end; j++) { if(j == i || alphas[j] == 0) continue; val_local += alphas[j]*sign_i* kEval(i, j); } val.addAndGet(val_local); }); val.set(val.get() * sign_i / (regularization * t)); if(val.get() < 1) alphas[i]++; } //Collect the non zero alphas int pos = 0; for (int i = 0; i < alphas.length; i++) if (alphas[i] != 0) { alphas[pos] = alphas[i] * sign[i]; ListUtils.swap(vecs, pos, i); pos++; } alphas = Arrays.copyOf(alphas, pos); vecs = new ArrayList<>(vecs.subList(0, pos)); setCacheMode(null); setAlphas(alphas); } @Override public boolean supportsWeightedData() { return false; } }
8,383
30.283582
111
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/svm/PlattSMO.java
package jsat.classifiers.svm; import static java.lang.Math.*; import java.util.*; import java.util.concurrent.ExecutorService; import jsat.DataSet; import jsat.classifiers.*; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.distributions.Distribution; import jsat.distributions.Exponential; import jsat.distributions.LogUniform; import jsat.distributions.kernels.KernelTrick; import jsat.distributions.kernels.LinearKernel; import jsat.distributions.kernels.RBFKernel; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.linear.ConstantVector; import jsat.linear.DenseVector; import jsat.linear.Vec; import jsat.parameters.*; import jsat.parameters.Parameter.WarmParameter; import jsat.regression.*; import jsat.utils.ListUtils; /** * An implementation of SVMs using Platt's Sequential Minimum Optimization (SMO) * for both Classification and Regression problems. <br> * <br> * This algorithm can be warm started for classification problems by any * algorithm implementing the {@link BinaryScoreClassifier} interface. For regression any algorithm can be used as a warms start. For best results, warm starts should be from algorithms that will have a similar solution to PlattSMO. <br><br> * See:<br> * <ul> * <li>Platt, J. C. (1998). <i>Sequential Minimal Optimization: A Fast Algorithm * for Training Support Vector Machines</i>. Advances in kernel methods * (pp. 185 – 208). Retrieved from <a href="http://www.bradblock.com/Sequential_Minimal_Optimization_A_Fast_Algorithm_for_Training_Support_Vector_Machine.pdf">here</a></li> * <li>Keerthi, S. S., Shevade, S. K., Bhattacharyya, C.,&amp;Murthy, K. R. K. * (2001). <i>Improvements to Platt’s SMO Algorithm for SVM Classifier Design * </i>. Neural Computation, 13(3), 637–649. doi:10.1162/089976601300014493</li> * <li>Smola, A. J.,&amp;Schölkopf, B. (2004). <i>A tutorial on support vector * regression</i>. Statistics and Computing, 14(3), 199–222. * doi:10.1023/B:STCO.0000035301.49549.88</li> * <li>Shevade, S. K., Keerthi, S. S., Bhattacharyya, C.,&amp;Murthy, K. K. (1999) * . <i>Improvements to the SMO algorithm for SVM regression</i>. Control D * ivision, Dept. of Mechanical Engineering CD-99–16. Control Division, Dept. of * Mechanical Engineering. doi:10.1109/72.870050</li> * <li>Shevade, S. K., Keerthi, S. S., Bhattacharyya, C.,&amp;Murthy, K. K. (2000) * . <i>Improvements to the SMO algorithm for SVM regression</i>. IEEE * transactions on neural networks / a publication of the IEEE Neural Networks * Council, 11(5), 1188–93. doi:10.1109/72.870050</li> * </ul> * * @author Edward Raff */ public class PlattSMO extends SupportVectorLearner implements BinaryScoreClassifier, WarmRegressor, Parameterized, WarmClassifier { private static final long serialVersionUID = 1533410993462673127L; /** * Bias */ protected double b = 0, b_low, b_up; private double C = 1; private double tolerance = 1e-3; private double eps = 1e-7; private double epsilon = 1e-2; private int maxIterations = 10000; private boolean modificationOne = true; protected double[] fcache; private int i_up, i_low; /* NOTE: Only I_0 needs to be iterated over, so make it a set to iterate * quickly. All others only need set/check, so just use a boolean array. * This saves memory. (bools default false, so they start out all 'empty') */ /* * used only in regression */ private double[] alpha_s; /** * i : 0 &lt; a_i &lt; C * <br> * For regression this contains both of I0_a and I0_b */ private boolean[] I0; /** * Indicates if the value that is currently in I0 is also in I0_a. If its * not in I0, the value in this array is false */ private boolean[] I0_a; /** * Indicates if the value that is currently in I0 is also in I0_b. If its * not in I0, the value in this array is false */ private boolean[] I0_b; /** * i: y_i = 1 AND a_i = 0 */ private boolean[] I1; /** * i: y_i = -1 AND a_i = C */ private boolean[] I2; /** * i: y_i = 1 AND a_i = C */ private boolean[] I3; /** * i: y_i = -1 AND a_i = 0 */ private boolean[] I4; /** * Stores the true value of the data point */ protected double[] label; /** * Weight values to apply to each data point */ protected Vec weights; /** * Creates a new SVM object with a {@link LinearKernel} that uses no cache * mode. * */ public PlattSMO() { this(new LinearKernel()); } /** * Creates a new SVM object that uses no cache mode. * * @param kf the kernel trick to use */ public PlattSMO(KernelTrick kf) { super(kf, SupportVectorLearner.CacheMode.NONE); } @Override public CategoricalResults classify(DataPoint data) { if(vecs == null) throw new UntrainedModelException("Classifier has yet to be trained"); CategoricalResults cr = new CategoricalResults(2); double sum = getScore(data); if(sum > 0) cr.setProb(1, 1); else cr.setProb(0, 1); return cr; } @Override public double getScore(DataPoint dp) { return kEvalSum(dp.getNumericalValues())-b; } @Override public void train(ClassificationDataSet dataSet, Classifier warmSolution, boolean parallel) { train(dataSet, warmSolution); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet, Classifier warmSolution) { trainC_warm_and_normal(dataSet, warmSolution); } @Override public void train(ClassificationDataSet dataSet) { trainC_warm_and_normal(dataSet, null); } private void trainC_warm_and_normal(ClassificationDataSet dataSet, Classifier warmSolution) { if(dataSet.getClassSize() != 2) throw new FailedToFitException("SVM does not support non binary decisions"); //First we need to set up the vectors array final int N = dataSet.size(); vecs = new ArrayList<Vec>(N); label = new double[N]; weights = new DenseVector(N); b = 0; i_up = i_low = -1;//giberish for init I0 = new boolean[N]; I1 = new boolean[N]; I2 = new boolean[N]; I3 = new boolean[N]; I4 = new boolean[N]; boolean allWeightsAreOne = true; for(int i = 0; i < N; i++) { DataPoint dataPoint = dataSet.getDataPoint(i); vecs.add(dataPoint.getNumericalValues()); weights.set(i, dataSet.getWeight(i)); if( dataSet.getWeight(i) != 1) allWeightsAreOne = false; if(dataSet.getDataPointCategory(i) == 0) { label[i] = -1; i_low = i; I4[i] = true; } else { label[i] = 1; i_up = i; I1[i] = true; } } if(allWeightsAreOne)//if everything == 1, don't waste the memory storying it weights = new ConstantVector(1.0, N); setCacheMode(getCacheMode());//Initiates the cahce //initialize alphas array to all zero alphas = new double[N];//zero is default value fcache = new double[N]; b_up = -1; fcache[i_up] = -1; b_low = 1; fcache[i_low] = 1; boolean examinAll = true; //Now lets try and do some warm starting if applicable if(warmSolution instanceof PlattSMO || warmSolution instanceof BinaryScoreClassifier) { examinAll = false; WarmScope://We need to use one of the methods to detemrine if the model is a fit { if (warmSolution instanceof PlattSMO) { //if this SMO object was learend on the same data, we can get a very good guess on C PlattSMO warmSMO = (PlattSMO) warmSolution; //first, we need to make sure we were actually trained on the same data //TODO find a better way to ensure this is true, it is POSSIBLE that we could have same labels and different data if(warmSMO.alphas == null)//whats going on? just break out break WarmScope; boolean sameData = alphas.length == warmSMO.alphas.length; if(sameData) for(int i = 0; i < this.label.length && sameData; i++) //copy sign used so that -0.0 gets picked up as -1.0 and 0.0 as 1.0 if(this.label[i] != Math.copySign(1.0, warmSMO.alphas[i])) sameData = false; if(sameData) { double C_prev = warmSMO.C; double multiplier = this.C / C_prev; for (int i = 0; i < vecs.size(); i++) this.alphas[i] = fuzzyClamp(multiplier * Math.abs(warmSMO.alphas[i]), this.C); break WarmScope;//init sucessful } //else, fall through and let 2nd case cick in } //last case, should be true if (warmSolution instanceof BinaryScoreClassifier) { //In this case we can take a decent guess at the values of alpha BinaryScoreClassifier warmSC = (BinaryScoreClassifier) warmSolution; for (int i = 0; i < vecs.size(); i++) { //get the loss, normaly wrapped by max(x, 0), but that will be handled by the clamp double guess = 1 - label[i] * warmSC.getScore(dataSet.getDataPoint(i)); this.alphas[i] = fuzzyClamp(C * guess, C); } } else//how did this happen? { throw new FailedToFitException("BUG: Should not have been able to reach"); } } fcache[i_up] = 0; fcache[i_low] = 0; for (int i = 0; i < vecs.size(); i++) { //we can't skip a_i == 0 b/c we still need to make the contribution to w_dot_x final double a_i = this.alphas[i]; for (int j = i; j < vecs.size(); j++) { final double a_j = this.alphas[j]; if (a_j == 0 && a_i == 0) continue; double dot = kEval(i, j); if (i != j)//avoid double counting fcache[j] += a_i * label[i] * dot; fcache[i] += a_j * label[j] * dot; } } //determine i_up and i_low based on equations (11a) and (11b) in Keerthi et al for (int i = 0; i < vecs.size(); i++) { fcache[i] -= label[i]; updateSet(i, alphas[i], C*weights.get(i)); updateSetsLabeled(i, alphas[i], C*weights.get(i)); if(label[i] == -1) if(I0[i] && (i_low == -1 || fcache[i] > fcache[i_low]) ) { i_low = i; b_low = fcache[i]; } else if(I0[i] && (i_low == -1 || fcache[i] > fcache[i_up]) ) { i_up = i; b_up = fcache[i]; } } } int numChanged = 0; int examinAllCount = 0; int iter = 0; while( (examinAll || numChanged > 0) && iter < maxIterations ) { iter++; numChanged = 0; if (examinAll) { //loop I over all training examples for (int i = 0; i < N; i++) numChanged += examineExample(i); examinAllCount++; } else { if (modificationOne) { for(int i = 0; i < I0.length; i++) { if(!I0[i]) continue; numChanged += examineExample(i); if (b_up > b_low - 2 * tolerance) { numChanged = 0;//causes examinAll to become true break; } } } else { boolean inner_loop_success = true; while (b_up < b_low - 2 * tolerance && inner_loop_success) if (inner_loop_success = takeStep(i_up, i_low)) numChanged++; numChanged = 0; } } if(examinAll) examinAll = false; else if(numChanged == 0) examinAll = true; } if (iter >= maxIterations) {//1 extra pass to get a better guess on bUp & bLow since we quit early for (int i = 0; i < N; i++) numChanged += examineExample(i); } b = (b_up+b_low)/2; //collapse label into signed alphas for(int i = 0; i < label.length; i++) alphas[i] *= label[i]; // sparsify(); label = null; fcache = null; I0 = I1 = I2 = I3 = I4 = null; weights = null; setCacheMode(null); setAlphas(alphas); } /** * Updates the index set I0 * @param i1 the value of i1 * @param a1 the value of a1 * @param C the regularization value to use for this datum */ private void updateSet(int i1, double a1, double C ) { I0[i1] = a1 > 0 && a1 < C; } private double fuzzyClamp(double val, double max) { return fuzzyClamp(val, max, max*1e-7); } private double fuzzyClamp(double val, double max, double e) { if(val > max-e) return max; if(val < e) return 0; return val; } private void updateSetR(int i, double C) { /** * See page 5 of he pseudo code paper version of "Improvements to the SMO algorithm for SVM regression." */ //TODO, this can be done with less work.. but I messed that up double a_i = alphas[i]; double as_i = alpha_s[i]; I0_a[i] = 0 < a_i && a_i < C; I0_b[i] = 0 < as_i && as_i < C; I0[i] = I0_a[i] || I0_b[i]; I1[i] = a_i == 0 && as_i == 0; I2[i] = a_i == 0 && as_i == C; I3[i] = a_i == C && as_i == 0; } /** * Updates the index sets * @param i1 the index to update for * @param a1 the alphas value for the index * @param C the regularization value to use for this datum */ private void updateSetsLabeled(int i1, final double a1, final double C) { final double y_i = label[i1]; I1[i1] = a1 == 0 && y_i == 1; I2[i1] = a1 == C && y_i == -1; I3[i1] = a1 == C && y_i == 1; I4[i1] = a1 == 0 && y_i == -1; } protected boolean takeStep(int i1, int i2) { if(i1 == i2) return false; //alph1 = Lagrange multiplier for i double alpha1 = alphas[i1], alpha2 = alphas[i2]; //y1 = target[i1] double y1 = label[i1], y2 = label[i2]; double F1 = fcache[i1]; double F2 = fcache[i2]; final double C1 = C*weights.get(i1); final double C2 = C*weights.get(i2); //s = y1*y2 double s = y1*y2; //Compute L, H : see smo-book, page 46 //also "A tutorial on support vector regression" page 30 double L, H; if(y1 != y2) { L = max(0, alpha2-alpha1); H = min(C2, C1+alpha2-alpha1); } else { L = max(0, alpha1+alpha2-C1); H = min(C2, alpha1+alpha2); } if (L >= H)//>= instead of == incase of numerical issues return false; double a1;//new alpha1 double a2;//new alpha2 /* * k11 = kernel(point[i1],point[i1]) * k12 = kernel(point[i1],point[i2]) * k22 = kernel(point[i2],point[i2] */ double k11 = kEval(i1, i1); double k12 = kEval(i1, i2); double k22 = kEval(i2, i2); //eta = 2*k12-k11-k22 double eta = 2*k12 - k11 - k22; if (eta < 0) { a2 = alpha2 - y2 * (F1 - F2) / eta; if (a2 < L) a2 = L; else if (a2 > H) a2 = H; } else { /* * Lobj = objective function at a2=L * Hobj = objective function at a2=H */ double L1 = alpha1 + s * (alpha2 - L); double H1 = alpha1 + s * (alpha2 - H); double f1 = y1 * F1 - alpha1 * k11 - s * alpha2 * k12; double f2 = y2 * F2 - alpha2 * k22 - s * alpha1 * k12; double Lobj = -0.5 * L1 * L1 * k11 - 0.5 * L * L * k22 - s * L * L1 * k12 - L1 * f1 - L * f2; double Hobj = -0.5 * H1 * H1 * k11 - 0.5 * H * H * k22 - s * H * H1 * k12 - H1 * f1 - H * f2; if(Lobj > Hobj + eps) a2 = L; else if(Lobj < Hobj - eps) a2 = H; else a2 = alpha2; } a2 = fuzzyClamp(a2, C2); if(abs(a2 - alpha2) < eps*(a2+alpha2+eps)) return false; a1 = alpha1 + s *(alpha2-a2); a1 = fuzzyClamp(a1, C1); double newF1C = F1 + y1*(a1-alpha1)*k11 + y2*(a2-alpha2)*k12; double newF2C = F2 + y1*(a1-alpha1)*k12 + y2*(a2-alpha2)*k22; updateSet(i1, a1, C1); updateSet(i2, a2, C2); updateSetsLabeled(i1, a1, C1); updateSetsLabeled(i2, a2, C2); fcache[i1] = newF1C; fcache[i2] = newF2C; b_low = Double.NEGATIVE_INFINITY; b_up = Double.POSITIVE_INFINITY; i_low = -1; i_up = -1; //"Update fcache[i] for i in I_0 using new Lagrange multipliers", done inside loop check for new bounds for(int i = 0; i < I0.length; i++) { if(!I0[i]) continue; if (i != i1 && i != i2) fcache[i] += y1 * (a1 - alpha1) * kEval(i1, i) + y2 * (a2 - alpha2) * kEval(i2, i); double bCand = fcache[i]; if (bCand > b_low) { i_low = i; b_low = bCand; } if (bCand < b_up) { i_up = i; b_up = bCand; } } //case where i1 & i2 are not in I0 for(int i : new int[]{i1, i2}) { if(I3[i] || I4[i]) { double bCand = fcache[i]; if (bCand > b_low ) { i_low = i; b_low = bCand; } } if(I1[i] || I2[i]) { double bCand = fcache[i]; if (bCand < b_up ) { i_up = i; b_up = bCand; } } } //Store a1 in the alphas array alphas[i1] = a1; //Store a2 in the alphas arra alphas[i2] = a2; return true; } protected boolean takeStepR(int i1, int i2) { if(i1 == i2) return false; //alph1 = Lagrange multiplier for i double alpha1 = alphas[i1], alpha2 = alphas[i2]; double alpha1_S = alpha_s[i1], alpha2_S = alpha_s[i2]; double F1 = fcache[i1];//phi1 in paper double F2 = fcache[i2]; final double C1 = C*weights.get(i1); final double C2 = C*weights.get(i2); /* * k11 = kernel(point[i1],point[i1]) * k12 = kernel(point[i1],point[i2]) * k22 = kernel(point[i2],point[i2] */ double k11 = kEval(i1, i1); double k12 = kEval(i2, i1); double k22 = kEval(i2, i2); //eta = -2*k12+k11+k22 double eta = -2*k12 + k11 + k22; if(eta < 0) eta = 0;//lets just assume it was a numerical issue... (dirty NPSD kernels) //gamma = alpha1-alpha1*+alpha2-alpha2* double gamma = alpha1-alpha1_S+alpha2-alpha2_S; boolean case1, case2, case3, case4, finished; case1 = case2 = case3 = case4 = finished = false; double alpha1_old = alpha1, alpha1_oldS = alpha1_S; double alpha2_old = alpha2, alpha2_oldS = alpha2_S; double deltaPhi = F1-F2; double L, H; while(!finished)//occurs at most 3 times { if(!case1 && (alpha1 > 0 || (alpha1_S == 0 && deltaPhi > 0) ) && (alpha2 > 0 || (alpha2_S == 0 && deltaPhi < 0) ) ) { //compute L, H, (wrt. alpha1, alpha2) L = max(0, gamma-C1); H = min(C2, gamma); if(L < H) { double a2 = max(L, min(alpha2 - deltaPhi/eta, H)); a2 = fuzzyClamp(a2, C2); double a1 = alpha1 - (a2 - alpha2); a1 = fuzzyClamp(a1, C1); if(abs(alpha1-a1) > 1e-10 || abs(a2-alpha2) > 1e-10) { deltaPhi += (a2-alpha2)*eta; alpha1 = a1; alpha2 = a2; } } else finished = true; case1 = true; } else if(!case2 && (alpha1 > 0 || (alpha1_S == 0 && deltaPhi > 2*epsilon)) && (alpha2_S > 0 || (alpha2 == 0 && deltaPhi > 2*epsilon))) { //compute L, H, (wrt. alpha1, alpha2*) L = max(0, -gamma); H = min(C2, -gamma+C1); if(L < H) { double a2 = max(L, min(alpha2_S + (deltaPhi-2*epsilon)/eta, H)); a2 = fuzzyClamp(a2, C2); double a1 = alpha1 + (a2 - alpha2_S); a1 = fuzzyClamp(a1, C1); if(abs(alpha1-a1) > 1e-10 || abs(alpha2_S-a2) > 1e-10) { deltaPhi += (alpha2_S-a2)*eta; alpha1 = a1; alpha2_S = a2; } } else finished = true; case2 = true; } else if(!case3 && (alpha1_S > 0 || (alpha1 == 0 && deltaPhi < -2*epsilon)) && (alpha2 > 0 || (alpha2_S == 0 && deltaPhi < -2*epsilon))) { //compute L, H, (wrt. alpha1*, alpha2) L = max(0, gamma); H = min(C2, C1+gamma); if(L < H) { double a2 = max(L, min(alpha2 - (deltaPhi+2*epsilon)/eta, H)); a2 = fuzzyClamp(a2, C2); double a1 = alpha1_S + (a2 - alpha2); a1 = fuzzyClamp(a1, C1); if(abs(alpha1_S-a1) > 1e-10 || abs(alpha2-a2) > 1e-10) { deltaPhi += (a2-alpha2)*eta; alpha1_S = a1; alpha2 = a2; } } else finished = true; case3 = true; } else if(!case4 && (alpha1_S > 0 || (alpha1 == 0 && deltaPhi < 0)) && (alpha2_S > 0 || (alpha2 == 0 && deltaPhi > 0))) { //compute L, H, (wrt. alpha1*, alpha2*) L = max(0, -gamma-C1); H = min(C2, -gamma); if(L < H) { double a2 = max(L, min(alpha2_S + deltaPhi/eta, H)); a2 = fuzzyClamp(a2, C2); double a1 = alpha1_S - (a2 - alpha2_S); a1 = fuzzyClamp(a1, C1); if(abs(alpha1_S-a1) > 1e-10 || abs(alpha2_S-a2) > 1e-10) { deltaPhi += (alpha2_S-a2)*eta; alpha1_S = a1; alpha2_S = a2; } } else finished = true; case4 = true; } else { finished = true; } } //TODO do a check for numerical issues //end of the while loop, did we change anything? if(alpha1 == alpha1_old && alpha1_S == alpha1_oldS && alpha2 == alpha2_old && alpha2_S == alpha2_oldS) { return false; } alphas[i1] = alpha1; alphas[i2] = alpha2; alpha_s[i1] = alpha1_S; alpha_s[i2] = alpha2_S; //Update error cache using new Lagrange multipliers double ceof1 = alpha1 - alpha1_old - (alpha1_S - alpha1_oldS); double ceof2 = alpha2 - alpha2_old - (alpha2_S - alpha2_oldS); for(int i = 0; i < I0.length; i++) if(I0[i] && i != i1 && i != i2) fcache[i] -= ceof1 * kEval(i1, i) + ceof2 * kEval(i2, i); fcache[i1] -= ceof1 * k11 + ceof2 * k12; fcache[i2] -= ceof1 * k12 + ceof2 * k22; updateSetR(i1, C1); updateSetR(i2, C2); //Update threshold to reflect change in Lagrange multipliers Update b_low = Double.NEGATIVE_INFINITY; b_up = Double.POSITIVE_INFINITY; i_low = -1; i_up = -1; for(int i = 0; i < I0.length; i++) if(I0[i]) updateThreshold(i); //may duplicate work... who cares? its just 2 constant time checks updateThreshold(i1); updateThreshold(i2); //These SHOULD ALWAYS BE NON NEGATIVE, i1 and i2 should have a valid update if(i_low == -1 || i_up == -1) throw new FailedToFitException("BUG: Imposible code block reached. Please report"); return true; } /** * Updates the threshold for regression based off of * "using only i1, i2, and indices in I_0" * @param i the index to update from that MUST have a value in {@link #fcache} */ private void updateThreshold(int i) { double Fi = fcache[i]; double F_tilde_i = b_low; if (I0_b[i] || I2[i]) F_tilde_i = Fi + epsilon; else if (I0_a[i] || I1[i]) F_tilde_i = Fi - epsilon; double F_bar_i = b_up; if (I0_a[i] || I3[i]) F_bar_i = Fi - epsilon; else if (I0_b[i] || I1[i]) F_bar_i = Fi + epsilon; //update the bounds if (b_low < F_tilde_i) { b_low = F_tilde_i; i_low = i; } if (b_up > F_bar_i) { b_up = F_bar_i; i_up = i; } } private int examineExample(int i2) { //y2 = target[i2] double y2 = label[i2]; double F2; if(I0[i2]) F2 = fcache[i2]; else { fcache[i2] = F2 = decisionFunction(i2) - y2; //update (b_low, i_low) or (b_up, i_up) using (F2, i2) if( (I1[i2] || I2[i2] ) && (F2 < b_up) ) { b_up = F2; i_up = i2; } else if( (I3[i2] || I4[i2]) && (F2 > b_low) ) { b_low = F2; i_low = i2; } } //check optimality using current b_low and b_up and, if violated, find //an index i1 to do joint optimization ith i2 boolean optimal = true; int i1 = -1;//giberish init value will not get used, but makes compiler smile final boolean I0_contains_i2 = I0[i2]; if(I0_contains_i2 || I1[i2] || I2[i2]) { if(b_low - F2 > tolerance*2) { optimal = false; i1 = i_low; } } if(I0_contains_i2 || I3[i2] || I4[i2]) { if(F2-b_up > tolerance*2) { optimal = false; i1 = i_up; } } if(optimal)//no changes if optimal return 0; //for i2 in I0 choose the better i1 if(I0_contains_i2) { if(b_low-F2 > F2-b_up) i1 = i_low; else i1 = i_up; } if(takeStep(i1, i2)) return 1; else return 0; } private int examineExampleR(int i2) { //y2 = target[i2] double y2 = label[i2]; double F2; if(I0[i2]) F2 = fcache[i2]; else { fcache[i2] = F2 = y2-decisionFunctionR(i2); //update (b_low, i_low) or (b_up, i_up) using (F2, i2) if(I1[i2]) { if(F2+eps < b_up) { b_up = F2+epsilon; i_up = i2; } else if(F2-epsilon > b_low) { b_low = F2-epsilon; i_low = i2; } } else if( I2[i2] && (F2+epsilon > b_low) ) { b_low = F2+epsilon; i_low = i2; } else if( I3[i2] && (F2-epsilon < b_up) ) { b_up = F2-epsilon; i_up = i2; } } //check optimality using current b_low and b_up and, if violated, find //an index i1 to do joint optimization ith i2 boolean optimal = true; int i1 = -1;//giberish init value will not get used, but makes compiler smile //5 cases to check final double F2mEps = F2-epsilon; final double F2pEps = F2+epsilon; final double tol2 = 2*tolerance; if (I0_a[i2])//case 1 { if (b_low - F2mEps > tol2) { optimal = false; i1 = i_low; if (F2mEps - b_up > b_low - F2mEps) i1 = i_up; } else if (F2mEps - b_up > tol2) { optimal = false; i1 = i_up; if (b_low - F2mEps > F2mEps - b_up) i1 = i_low; } } else if (I0_b[i2])//case 2 { if (b_low - F2pEps > tol2) { optimal = false; i1 = i_low; if (F2pEps - b_up > b_low - F2pEps) i1 = i_up; } else if (F2pEps - b_up > tol2) { optimal = false; i1 = i_up; if (b_low - F2pEps > F2pEps - b_up) i1 = i_low; } } else if (I1[i2])//case 3 { if (b_low - F2pEps > tol2) { optimal = false; i1 = i_low; if (F2pEps - b_up > b_low - F2pEps) i1 = i_up; } else if (F2mEps - b_up > tol2) { optimal = false; i1 = i_up; if (b_low - F2mEps > F2mEps - b_up) i1 = i_low; } } else if (I2[i2])//case 4 { if (F2pEps - b_up > tol2) { optimal = false; i1 = i_up; } } else if (I3[i2])//case 5 { if (b_low - F2mEps > tol2) { optimal = false; i1 = i_low; } } if(optimal) return 0; if(takeStepR(i1, i2)) return 1; else return 0; } /** * Returns the local decision function for classification training purposes * without the bias term * @param v the index of the point to select * @return the decision function output sans bias */ protected double decisionFunction(int v) { double sum = 0; for(int i = 0; i < vecs.size(); i++) if(alphas[i] > 0) sum += alphas[i] * label[i] * kEval(v, i); return sum; } /** * Returns the local decision function for regression training purposes * without the bias term * @param v the index of the point to select * @return the decision function output sans bias */ protected double decisionFunctionR(int v) { double sum = 0; for (int i = 0; i < vecs.size(); i++) if (alphas[i] != alpha_s[i])//multipler would be zero sum += (alphas[i] - alpha_s[i]) * kEval(v, i); return sum; } @Override public PlattSMO clone() { PlattSMO copy = new PlattSMO(this.getKernel().clone()); copy.C = this.C; if(this.alphas != null) copy.alphas = Arrays.copyOf(this.alphas, this.alphas.length); if(this.alpha_s != null) copy.alpha_s = Arrays.copyOf(this.alpha_s, this.alpha_s.length); if(this.weights != null) copy.weights = this.weights.clone(); copy.b = this.b; copy.eps = this.eps; copy.epsilon = this.epsilon; copy.maxIterations = this.maxIterations; if(this.label != null) copy.label = Arrays.copyOf(this.label, this.label.length); copy.tolerance = this.tolerance; if(this.vecs != null) copy.vecs = new ArrayList<Vec>(this.vecs); copy.setCacheMode(this.getCacheMode()); copy.setCacheValue(this.getCacheValue()); return copy; } @Override public boolean supportsWeightedData() { return true; } /** * Sets the complexity parameter of SVM. The larger the C value the harder * the margin SVM will attempt to find. Lower values of C allow for more * misclassification errors. * @param C the soft margin parameter */ @WarmParameter(prefLowToHigh = true) public void setC(double C) { if(C <= 0) throw new ArithmeticException("C must be a positive constant"); this.C = C; } /** * Returns the soft margin complexity parameter of the SVM * @return the complexity parameter of the SVM */ public double getC() { return C; } /** * Sets the maximum number of iterations to perform of the training loop. * This is important for cases with a C value that is to large for a non * linear problem, which can result in SVM failing to converge. * @param maxIterations the maximum number of main iteration loops */ public void setMaxIterations(int maxIterations) { this.maxIterations = maxIterations; } /** * Returns the maximum number of iterations * @return the maximum number of iterations */ public int getMaxIterations() { return maxIterations; } /** * Sets where or not modification one or two should be used when training. * Modification two is more aggressive, but often results in less kernel * evaluations. * * @param modificationOne {@code true} to us modificaiotn one, {@code false} * to use modification two. */ public void setModificationOne(boolean modificationOne) { this.modificationOne = modificationOne; } /** * Returns true if modification one is in use * @return true if modification one is in use */ public boolean isModificationOne() { return modificationOne; } /** * Sets the tolerance for the solution. Higher values converge to worse * solutions, but do so faster * @param tolerance the tolerance for the solution */ public void setTolerance(double tolerance) { this.tolerance = tolerance; } /** * Returns the solution tolerance * @return the solution tolerance */ public double getTolerance() { return tolerance; } @Override public double regress(DataPoint data) { return kEvalSum(data.getNumericalValues())+b; } @Override public void train(RegressionDataSet dataSet, boolean parallel) { train(dataSet); } /** * Sets the epsilon for the epsilon insensitive loss when performing * regression. This variable has no impact during classification problems. * For regression problems, any predicated value that is within the epsilon * of the target will be treated as "correct". Increasing epsilon usually * decreases the number of support vectors, but may reduce the accuracy of * the model * * @param epsilon the positive value for the acceptable error when doing * regression */ public void setEpsilon(double epsilon) { if(Double.isNaN(epsilon) || Double.isInfinite(epsilon) || epsilon <= 0) throw new IllegalArgumentException("epsilon must be in (0, infty), not " + epsilon); this.epsilon = epsilon; } /** * Returns the epsilon insensitive loss value * @return the epsilon insensitive loss value */ public double getEpsilon() { return epsilon; } @Override public void train(RegressionDataSet dataSet, Regressor warmSolution, boolean parallel) { train(dataSet, warmSolution); } @Override public void train(RegressionDataSet dataSet) { train(dataSet, (Regressor)null); } @Override public void train(RegressionDataSet dataSet, Regressor warmSolution) { final int N = dataSet.size(); vecs = new ArrayList<Vec>(N); label = new double[N]; fcache = new double[N]; b = 0; weights = new DenseVector(N); boolean allWeightsAreOne = true; for(int i = 0; i < N; i++) { DataPoint dataPoint = dataSet.getDataPoint(i); vecs.add(dataPoint.getNumericalValues()); fcache[i] = label[i] = dataSet.getTargetValue(i); weights.set(i, dataSet.getWeight(i)); if(dataSet.getWeight(i) != 1) allWeightsAreOne = false; } if(allWeightsAreOne)//if everything == 1, don't waste the memory storying it weights = new ConstantVector(1.0, N); setCacheMode(getCacheMode());//Initiates the cahce I0 = new boolean[N]; I0_a = new boolean[N]; I0_b = new boolean[N]; I1 = new boolean[N]; I2 = new boolean[N]; I3 = new boolean[N]; //initialize alphas array to all zero alphas = new double[N];//zero is default value alpha_s = new double[N]; i_up = i_low = 0;//value chosen completly at random, I promise (any input will be fine) Arrays.fill(I1, true); b_up = b_low = dataSet.getTargetValue(i_up); b_up += eps; b_low -= eps; boolean examinAll = true; //no errors set, all zero so far.. if(warmSolution != null) { /* * warm for regression is kinda hard, so we use it to set the initial focus on a few data poitns. We set the alpha values to be non zero for the points that are errors and let everything else past the "margin" (ie: in the cone of the espilon) be zero. Then the first few passes of SMO will optimize this intial set, and then when examineAll becomes true larger corrections can be made. */ examinAll = false; b_low = -Double.MAX_VALUE; b_up = Double.MAX_VALUE; for (int i = 0; i < N; i++) { double err = label[i]-warmSolution.regress(dataSet.getDataPoint(i)); if(Math.abs(err) < epsilon) err = 0; else err -= Math.signum(err)*epsilon; // err = signum(err)*min(abs(err), 1); double C_i = C*weights.get(i); alphas[i] = fuzzyClamp(err, C_i, 1e-6); alpha_s[i] = fuzzyClamp(-err, C_i, 1e-6); } for (int i = 0; i < N; i++) { //fix F cache and set assignment final double C_i = C*weights.get(i); final double F_i = fcache[i] = label[i]-decisionFunctionR(i); updateSetR(i, C_i); //fix up the bounds on bias term, see eq (3) in "Improvements to the SMO algorithm for SVM regression." if(I0[i] || I1[i] || I3[i]) b_up = min(b_up, F_i); if(I0[i] || I1[i] || I2[i]) b_low = max(b_low, F_i); } b_up-=epsilon; b_low+=epsilon; } int numChanged = 0; int examinAllCount = 0; int iter = 0; while( (examinAll || numChanged > 0) && iter < maxIterations ) { iter++; numChanged = 0; if (examinAll) { //loop I over all training examples for (int i = 0; i < N; i++) numChanged += examineExampleR(i); examinAllCount++; } else { if(modificationOne) { for(int i = 0; i < I0.length; i++) { if(!I0[i]) continue; numChanged += examineExampleR(i); if (b_up > b_low - 2*tolerance) { numChanged = 0; break; } } } else//modification 2 { boolean inner_loop_success = true; do { if(inner_loop_success == takeStepR(i_up, i_low)) numChanged++; } while(inner_loop_success && b_up < b_low-2*tolerance); numChanged = 0; } } if (examinAll) examinAll = false; else if (numChanged == 0) examinAll = true; } b = (b_up+b_low)/2; //SVMs are usualy sparse, we dont need to keep all the original vectors! int supportVectorCount = 0; for(int i = 0; i < N; i++) if(alphas[i] != 0 || alpha_s[i] != 0)//Its a support vector { ListUtils.swap(vecs, supportVectorCount, i); alphas[supportVectorCount++] = alphas[i]-alpha_s[i]; } vecs = new ArrayList<Vec>(vecs.subList(0, supportVectorCount)); alphas = Arrays.copyOfRange(alphas, 0, supportVectorCount); label = null; fcache = null; I0 = I0_a = I0_b = I1 = I2 = I3 = I4 = null; setCacheMode(null); setAlphas(alphas); } @Override public boolean warmFromSameDataOnly() { return false; } /** * Guess the distribution to use for the regularization term * {@link #setC(double) C} in a SVM. * * @param d the data set to get the guess for * @return the guess for the C parameter in the SVM */ public static Distribution guessC(DataSet d) { return new LogUniform(1e-1, 100); } }
45,251
30.822785
398
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/svm/SBP.java
package jsat.classifiers.svm; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Random; import java.util.concurrent.ExecutorService; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.DataPoint; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.distributions.kernels.KernelTrick; import jsat.exceptions.FailedToFitException; import jsat.exceptions.UntrainedModelException; import jsat.linear.Vec; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.utils.IndexTable; import jsat.utils.ListUtils; import jsat.utils.random.RandomUtil; import jsat.utils.random.XORWOW; /** * Implementation of the Stochastic Batch Perceptron (SBP) algorithm. Despite * its name, it solves the kernelized SVM problem. Because it is done * stochastically, it may not produce Support Vectors that the standard SVM * algorithm learns. It can learn at most one SV per iteration. * * See:<br> * Cotter, A., Shalev-Shwartz, S.,&amp;Srebro, N. (2012). <i>The Kernelized * Stochastic Batch Perceptron</i>. International Conference on Machine * Learning. Learning. Retrieved from <a href="http://arxiv.org/abs/1204.0566"> * here</a> * * @author Edward Raff */ public class SBP extends SupportVectorLearner implements BinaryScoreClassifier, Parameterized { private static final long serialVersionUID = 6112916782260792833L; private double nu = 0.1; private int iterations; private double burnIn = 1.0/5.0; /** * Creates a new SBP SVM learner * @param kernel the kernel to use * @param cacheMode the type of kernel cache to use */ public SBP(KernelTrick kernel, CacheMode cacheMode, int iterations, double v) { super(kernel, cacheMode); setIterations(iterations); setNu(v); } /** * Copy constructor * @param other the object to copy */ protected SBP(SBP other) { this(other.getKernel().clone(), other.getCacheMode(), other.iterations, other.nu); if(other.alphas != null) this.alphas = Arrays.copyOf(other.alphas, other.alphas.length); } @Override public SBP clone() { return new SBP(this); } /** * Sets the number of iterations to go through. At most one SV can be * learned per iteration. If more iterations are done than there are SVs, it * is highly likely that O(n) SVs will be used, making the model very dense. * It may take far fewer iterations of the algorithm than there are data * points to get good accuracy. * @param iterations the number of iterations of the algorithm to perform */ public void setIterations(int iterations) { this.iterations = iterations; } /** * Returns the number of iterations the algorithm will perform * @return the number of iterations the algorithm will perform */ public int getIterations() { return iterations; } /** * The nu parameter for this SVM is not the same as the standard nu-SVM * formulation, though it plays a similar role. It must be in the range * (0, 1), where small values indicate a linearly separable problem (in the * kernel space), and large values mean the problem is less separable. If * the value is too small for the problem, the SVM may fail to converge or * produce good results. * * @param nu the value between (0, 1) */ public void setNu(double nu) { if(Double.isNaN(nu) || nu <= 0 || nu >= 1) throw new IllegalArgumentException("nu must be in the range (0, 1)"); this.nu = nu; } /** * Returns the nu SVM parameter * @return the nu SVM parameter */ public double getNu() { return nu; } /** * Sets the burn in fraction. SBP averages the intermediate solutions from * each step as the final solution. The intermediate steps of SBP are highly * correlated, and the begging solutions are usually not as meaningful * toward the converged solution. To overcome this issue a certain fraction * of the iterations are not averaged into the final solution, making them * the "burn in" fraction. A value of 0.25 would then be ignoring the * initial 25% of solutions. * @param burnIn the ratio int [0, 1) initial solutions to ignore */ public void setBurnIn(double burnIn) { if(Double.isNaN(burnIn) || burnIn < 0 || burnIn >= 1) throw new IllegalArgumentException("BurnInFraction must be in [0, 1), not " + burnIn); this.burnIn = burnIn; } /** * * @return the burn in fraction */ public double getBurnIn() { return burnIn; } @Override public CategoricalResults classify(DataPoint data) { if(vecs == null) throw new UntrainedModelException("Classifier has yet to be trained"); CategoricalResults cr = new CategoricalResults(2); double sum = getScore(data); //SVM only says yess / no, can not give a percentage if(sum < 0) cr.setProb(0, 1.0); else cr.setProb(1, 1.0); return cr; } @Override public double getScore(DataPoint dp) { return kEvalSum(dp.getNumericalValues()); } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { if(dataSet.getClassSize() != 2) throw new FailedToFitException("SBP supports only binary classification"); final int n = dataSet.size(); /** * First index where we start summing for the average */ final int T_0 = (int) Math.min((burnIn*iterations), iterations-1); /* * Respone values */ double[] C = new double[n]; double[] CSum = new double[n]; alphas = new double[n]; double[] alphasSum = new double[n]; double[] y = new double[n]; vecs = new ArrayList<Vec>(n); for(int i = 0; i < n; i++) { y[i] = dataSet.getDataPointCategory(i)*2-1; vecs.add(dataSet.getDataPoint(i).getNumericalValues()); } setCacheMode(getCacheMode());//Initiates the cahce Random rand = RandomUtil.getRandom(); double maxKii = 0; for(int i = 0; i < n; i++) maxKii = Math.max(maxKii, kEval(i, i)); final double eta_0 = 1/Math.sqrt(maxKii); double rSqrd = 0; for(int t = 1; t <= iterations; t++) { final double eta = eta_0/Math.sqrt(t); final double gamma = findGamma(C, n*nu); int i; i = sampleC(rand, n, C, gamma); alphas[i] += eta; rSqrd = updateLoop(rSqrd, eta, C, i, y, n); rSqrd = projectionStep(rSqrd, n, C); if(t >= T_0) for(int j = 0; j < n; j++) { alphasSum[j] += alphas[j]; CSum[j] += C[j]; } } //Take the averages for (int j = 0; j < n; j++) { alphas[j] = alphasSum[j]/(iterations-T_0); C[j] = CSum[j]/(iterations-T_0); } double gamma = findGamma(C, n*nu); for (int j = 0; j < n; j++) alphas[j] /= gamma; //Clean up to only the SVs int supportVectorCount = 0; for(int i = 0; i < vecs.size(); i++) if(alphas[i] != 0)//its a support vector { ListUtils.swap(vecs, supportVectorCount, i); alphas[supportVectorCount++] = alphas[i]*y[i]; } vecs = new ArrayList<Vec>(vecs.subList(0, supportVectorCount)); alphas = Arrays.copyOfRange(alphas, 0, supportVectorCount); it = null; setCacheMode(null); setAlphas(alphas); } private double projectionStep(double rSqrd, final int n, double[] C) { if(rSqrd > 1)//1^2 = 1, so jsut use sqrd version { final double rInv = 1/Math.sqrt(rSqrd); for(int j = 0; j < n; j++) { C[j] *= rInv; alphas[j] *= rInv; } rSqrd = 1; } return rSqrd; } private int sampleC(Random rand, final int n, double[] C, final double gamma) throws FailedToFitException { int i = 0; //Samply uniformly from C[i] <= gamma int attempts = 0;//you get 5 attempts to find one quickly do { i = rand.nextInt(n); attempts++; } while(C[i] > gamma && attempts < 5); if(C[i] > gamma)//find one the slow way { int candidates = 0; for(int j = 0; j < C.length; j++) { if(C[j] < gamma) candidates++; } if(candidates == 0) throw new FailedToFitException("BUG: please report"); int randCand = rand.nextInt(candidates); i = 0; for(int j = 0; j < C.length && i < randCand; j++) if(C[i] < gamma) i++; } return i; } private double updateLoop(double rSqrd, final double eta, double[] C, int i, double[] y, final int n) { rSqrd += 2*eta*C[i]+eta*eta*kEval(i, i); final double y_i = y[i]; for(int j = 0; j < n; j++) C[j] += eta*y_i*y[j]*kEval(i, j); return rSqrd; } @Override public boolean supportsWeightedData() { return false; } private IndexTable it; //TODO add bias version of findGamma private double findGamma(double[] C, double d) { if(it == null ) it = new IndexTable(C); else it.sort(C);//few will change from iteration to iteration, Java's TimSort should be able to exploit this double sum = 0; double max; double finalScore = 0, prevScore = 0; int i; for(i = 0; i < it.length(); i++) { max = C[it.index(i)]; sum += max; double score = max*i-sum; prevScore = finalScore; finalScore = (d-max*i+sum)/i+max; if(score >= d) break; } return prevScore; } }
10,853
28.900826
116
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/svm/SVMnoBias.java
/* * Copyright (C) 2016 Edward Raff * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package jsat.classifiers.svm; import java.util.concurrent.ExecutorService; import jsat.classifiers.CategoricalResults; import jsat.classifiers.ClassificationDataSet; import jsat.classifiers.DataPoint; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.distributions.kernels.KernelTrick; import jsat.exceptions.UntrainedModelException; import jsat.linear.Vec; import static java.lang.Math.*; import java.util.Arrays; import jsat.distributions.kernels.NormalizedKernel; import jsat.utils.concurrent.AtomicDouble; import jsat.utils.concurrent.ParallelUtils; /** * This class implements a version of the Support Vector Machine without a bias * term. In addition, the current implementation requires that the Kernel Trick * used be a {@link KernelTrick#normalized() normalized} kernel. If the given * kernel is not normalized, this class will automatically wrap it to become * normalized.<br> * <br> * Because there is no bias term, this class should never be used with the * Linear kernel. But for the more common RBF kernel the lack of bias term * should have minimal impact on accuracy.<br> * <br> * See: Steinwart, I., Hush, D., & Scovel, C. (2011). <i>Training SVMs Without * Offset</i>. The Journal of Machine Learning Research, 12, 141–202. * * @author Edward Raff */ public class SVMnoBias extends SupportVectorLearner implements BinaryScoreClassifier { private double C = 1; private double tolerance = 1e-3; /** * Stores the true label value (-1 or +1) of the data point */ protected short[] label; /** * Weight values to apply to each data point */ protected Vec weights; //Variables used during training private double T_a; private double S_a; /** * Creates a new SVM object that uses no cache mode. * * @param kf the kernel trick to use */ public SVMnoBias(KernelTrick kf) { super(kf, SupportVectorLearner.CacheMode.NONE); } public SVMnoBias(SVMnoBias toCopy) { super(toCopy); if(toCopy.weights != null) this.weights = toCopy.weights.clone(); if(toCopy.label != null) this.label = Arrays.copyOf(toCopy.label, toCopy.label.length); this.C = toCopy.C; this.tolerance = toCopy.tolerance; } @Override public void setKernel(KernelTrick kernel) { if(kernel.normalized()) super.setKernel(kernel); else super.setKernel(new NormalizedKernel(kernel)); } @Override public double getScore(DataPoint dp) { return kEvalSum(dp.getNumericalValues()); } @Override public SVMnoBias clone() { return new SVMnoBias(this); } @Override public CategoricalResults classify(DataPoint data) { if (vecs == null) throw new UntrainedModelException("Classifier has yet to be trained"); CategoricalResults cr = new CategoricalResults(2); double sum = getScore(data); if (sum > 0) cr.setProb(1, 1); else cr.setProb(0, 1); return cr; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { bookKeepingInit(dataSet); double[] nabla_W = procedure3_init(); solver_1d(nabla_W, parallel); setCacheMode(null); } /** * * @param dataSet the dataset to train on * @param warm_start Array of initial alpha values to use for support * vectors. The absolute value of the inputs will be used. may be longer * than the number of data points. */ protected void train(ClassificationDataSet dataSet, double[] warm_start) { train(dataSet, warm_start, false); } protected void train(ClassificationDataSet dataSet, double[] warm_start, boolean parallel) { bookKeepingInit(dataSet); for(int i = 0; i < alphas.length; i++) alphas[i] = Math.abs(warm_start[i]); double[] nabla_W = procedure4m_init(parallel); solver_1d(nabla_W, parallel); setCacheMode(null); } private void solver_1d(final double[] nabla_W, boolean parallel) { final int N = alphas.length; final double lambda = 1/(2*C*N); //Algorithm 1 1D-SVM solver while(S_a > tolerance/(2*lambda)) { // System.out.println(S_a + " > " + tolerance/(2*lambda)); //Procedure 1 Calculate i∗ ∈ argmaxi=1,...,n δi · (∇Wi(α)−δi/2) double bestgain = -1; int i_max = -1; double best_delta = -1; for(int i = 0; i < N; i++) { double a_star_i = max(min(weights.get(i)*C, nabla_W[i]+alphas[i]), 0); double delta = a_star_i-alphas[i]; ///gain←δ· (∇Wi(α)−δ/2) double gain = delta*(nabla_W[i]-delta/2); if(gain >= bestgain) { bestgain = gain; i_max = i; best_delta = delta; } } //adjust alhpa alphas[i_max] += best_delta; //fuzzy clip to get hard 0/Cs if(alphas[i_max] + 1e-7 > weights.get(i_max)*C )//round to max alphas[i_max] = weights.get(i_max)*C; else if(alphas[i_max] - 1e-7 < 0)//round to 0 alphas[i_max] = 0; final double delta = best_delta; final int i = i_max; //use Procedure 2 to update ∇W(α) in direction i∗ by δ and calculate S(α) //T(α)←T(α)−δ(2∇Wi(α)−1−δ) T_a -= best_delta*(2*nabla_W[i_max]-1-best_delta); final AtomicDouble E_a = new AtomicDouble(0.0); accessingRow(i);//hint to caching scheme ParallelUtils.run(parallel, N, (start, end) -> { double Ea_delta = 0; for (int j = start; j < end; j++) { nabla_W[j] -= delta * label[i] * label[j] * kEval(i, j); Ea_delta += weights.get(j) * C * min(max(0, nabla_W[j]), 2); } E_a.addAndGet(Ea_delta); }); S_a = T_a + E_a.get(); } accessingRow(-1);//no more row accesses //collapse label into signed alphas for(int i = 0; i < label.length; i++) alphas[i] *= label[i]; } private double[] procedure3_init() { int N = alphas.length; //Procedure 3 Initialize by αi←0 and compute ∇W(α), S(α), and T(α). T_a = 0; S_a = 0; double[] nabla_W = new double[N]; for(int i = 0; i < N; i++) { nabla_W[i] = 1; S_a += weights.get(i)*C; } return nabla_W; } private double[] procedure4m_init(boolean parallel) { final int N = alphas.length; //Procedure 3 Initialize by αi←0 and compute ∇W(α), S(α), and T(α). T_a = 0; final AtomicDouble E_a = new AtomicDouble(0.0); final AtomicDouble T_a_accum = new AtomicDouble(0.0); final double[] nabla_W = new double[N]; ParallelUtils.run(parallel, N, (start, end)-> { double Ta_delta = 0; double Ea_delta = 0; for(int i = start; i < end; i++) { nabla_W[i] = 1; double nabla_Wi_delta = 0; for(int j = 0; j < N; j++) { if(alphas[j] == 0) continue; //We call k instead of kEval b/c we are accing most //of the n^2 values, so nothing will get to stay in //cache. Unless we are using FULL cacheing, in which //case we will get re-use. //Using k avoids LRU overhead which can be significant //for fast to evaluate datasets double k_ij; if(getCacheMode() == CacheMode.FULL) k_ij = kEval(i, j); else k_ij = k(i, j); nabla_Wi_delta -= alphas[j] * label[i] * label[j] * k_ij; } nabla_W[i] += nabla_Wi_delta; Ta_delta -= alphas[i]*nabla_W[i]; Ea_delta += weights.get(i)*C*min(max(nabla_W[i], 0), 2); } E_a.addAndGet(Ea_delta); T_a_accum.addAndGet(Ta_delta); }); T_a = T_a_accum.get(); S_a = T_a + E_a.get(); return nabla_W; } private void bookKeepingInit(ClassificationDataSet dataSet) { final int N = dataSet.size(); vecs = dataSet.getDataVectors(); weights = dataSet.getDataWeights(); label = new short[N]; for(int i = 0; i < N; i++) label[i] = (short) (dataSet.getDataPointCategory(i)*2-1); setCacheMode(getCacheMode());//Initiates the cahce //initialize alphas array to all zero alphas = new double[N];//zero is default value } @Override public boolean supportsWeightedData() { return true; } /** * Sets the complexity parameter of SVM. The larger the C value the harder * the margin SVM will attempt to find. Lower values of C allow for more * misclassification errors. * @param C the soft margin parameter */ // @Parameter.WarmParameter(prefLowToHigh = true) public void setC(double C) { if(C <= 0) throw new ArithmeticException("C must be a positive constant"); this.C = C; } /** * Returns the soft margin complexity parameter of the SVM * @return the complexity parameter of the SVM */ public double getC() { return C; } /** * Sets the tolerance for the solution. Higher values converge to worse * solutions, but do so faster * @param tolerance the tolerance for the solution */ public void setTolerance(double tolerance) { this.tolerance = tolerance; } /** * Returns the solution tolerance * @return the solution tolerance */ public double getTolerance() { return tolerance; } }
11,218
30.42577
94
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/svm/SupportVectorLearner.java
package jsat.classifiers.svm; import java.io.Serializable; import java.util.*; import jsat.distributions.kernels.KernelTrick; import jsat.distributions.kernels.LinearKernel; import jsat.linear.Vec; import jsat.parameters.Parameter.ParameterHolder; import jsat.utils.DoubleList; import jsat.utils.ListUtils; import jsat.utils.concurrent.ConcurrentCacheLRU; /** * Base class for support vector style learners. This means that the learner * performs batch training on a fixed set of training points using a * {@link KernelTrick kernel} to project the data into a different space. The * final set of vectors used may or may not be sparse. It does not necessarily * have to be a Support Vector machine. * <br><br> * This class provides caching mechanism to transparently provide faster kernel. * * @author Edward Raff */ public abstract class SupportVectorLearner implements Serializable { static final long serialVersionUID = 210140232301130063L; //Implementation note, NaN is used to indicate a cache value that has not been computed yet. @ParameterHolder private KernelTrick kernel; /** * The array of vectors. In the training phase, this should be the set of * all training vectors. After training, this should contain only the set of * support vectors. */ protected List<Vec> vecs; /** * The array of coefficients associated with each support vector. This * should be instantiated directly when training. When the set of alphas and * support vectors is finalized, {@link #setAlphas(double[]) } should be * called with a reference to itself or the array where the final alphas are * stored. This will initialized any accelerating structures so that * {@link #kEvalSum(jsat.linear.Vec) } can be called. */ protected double[] alphas; private CacheMode cacheMode; /** * Kernel evaluation acceleration cache */ protected List<Double> accelCache = null; private double[][] fullCache; /** * Stores rows of a cache matrix. */ private ConcurrentCacheLRU<Integer, double[]> partialCache; /** * We allow algorithms that know they are going to access a specific row to * hint, and save that row in this object to avoid overhead of hitting the * LRU. See {@link #accessingRow(int) } */ private double[] specific_row_cache_values = null; /** * The row that has been explicitly cached */ private int specific_row_cache_row = -1; /** * Holds an available row for inserting into the cache, null if not * available. All values already set to Nan */ private double[] availableRow; private int cacheConst = 500; /** * Sets the final set of alphas, and indicates that the final accelerating * structures (if available) should be constructed for performing kernel * evaluations against unseen vectors. * <br> * This may be called multiple times in an online scenario, but calls will * involve a re-construction of the whole cache. * * @param alphas the final array of alphas */ protected void setAlphas(double[] alphas) { this.alphas = alphas; accelCache = kernel.getAccelerationCache(vecs); } /** * Determines how the final kernel values are cached. The total number of * raw kernel evaluations can be tracked using {@link #evalCount}<br> * {@link #setCacheMode(jsat.classifiers.svm.SupportVectorLearner.CacheMode) } * should be called before training begins by the training algorithm as * described in the method documentation. */ public enum CacheMode { /** * No kernel value caching will be performed. */ NONE, /** * The entire kernel matrix will be created and cached ahead of time. * This is the best option if your data set is small and the kernel * cache can fit into memory. */ FULL, /** * Only the most recently used rows of the kernel matrix will be cached * (LRU). When a call to {@link #k(int, int) } occurs, the first value * will be taken to be the row of the matrix. <br> * Because the kernel matrix is symmetric, if a cache miss occurs - the * column value will be checked for its existence. If the row is * present, it will be used instead. If both rows are not present, then * a new row is inserted for the first index, and another row evicted if * necessary. * <br> * The {@link #cacheEvictions} indicates how many times a row has been * evicted from the cache. * <br> * Row values are computed lazily. */ ROWS }; /** * This constructor is meant manly for Serialization to work. It uses a * linear kernel and no caching. */ protected SupportVectorLearner() { this(new LinearKernel(), CacheMode.NONE); } /** * Creates a new Support Vector Learner * @param kernel the kernel trick to use * @param cacheMode the kernel caching method to use */ public SupportVectorLearner(KernelTrick kernel, CacheMode cacheMode) { this.cacheMode = cacheMode; setKernel(kernel); } /** * Copy constructor * @param toCopy the object to copy */ public SupportVectorLearner(SupportVectorLearner toCopy) { if(toCopy.kernel != null) this.kernel = toCopy.kernel.clone(); if(toCopy.vecs != null) { this.vecs = new ArrayList<Vec>(toCopy.vecs.size()); for(Vec v : toCopy.vecs) this.vecs.add(v.clone()); } if(toCopy.alphas != null) this.alphas = Arrays.copyOf(toCopy.alphas, toCopy.alphas.length); this.cacheMode = toCopy.cacheMode; if(toCopy.accelCache != null) this.accelCache = new DoubleList(toCopy.accelCache); if(toCopy.fullCache != null) { this.fullCache = new double[toCopy.fullCache.length][]; for(int i = 0; i < toCopy.fullCache.length; i++) this.fullCache[i] = Arrays.copyOf(toCopy.fullCache[i], toCopy.fullCache[i].length); } if(toCopy.partialCache != null)//TODO handling this better needs to be done { setCacheMode(cacheMode); // if(toCopy.availableRow != null) // this.availableRow = Arrays.copyOf(toCopy.availableRow, toCopy.availableRow.length); } this.cacheConst = toCopy.cacheConst; } /** * Sets the kernel trick to use * @param kernel the kernel trick to use */ public void setKernel(KernelTrick kernel) { this.kernel = kernel; } /** * Sets the cache value, which may be interpreted differently by different * caching schemes. <br> * This is currently only used for {@link CacheMode#ROWS}, where the value * indicates how many rows will be cached. * * @param cacheValue the cache value to be used */ public void setCacheValue(int cacheValue) { this.cacheConst = cacheValue; } /** * Sets the {@link #setCacheValue(int) cache value} to one that will use the * specified amount of memory. If the amount of memory specified is great * enough, this method will automatically set the * {@link #setCacheMode(jsat.classifiers.svm.SupportVectorLearner.CacheMode) * cache mode} to {@link CacheMode#FULL}. * * @param N the number of data points * @param bytes the number of bytes of memory to make the cache */ public void setCacheSize(long N, long bytes) { int DS = Double.SIZE/8; bytes /= DS;//Gets the total number of doubles we can store if(bytes > N*N/2) setCacheMode(CacheMode.FULL); else//How many rows can we handle? { //guessing 2 work overhead for object header + one pointer reference to the array, asusming 64 bit long bytesPerRow = N*DS+3*Long.SIZE/8; int rows = (int) Math.min(Math.max(1, bytes/bytesPerRow), Integer.MAX_VALUE); if(rows > 25) setCacheValue(rows); else//why bother? just use NONE setCacheMode(CacheMode.NONE); } } /** * Returns the current cache value * @return the current cache value */ public int getCacheValue() { return cacheConst; } /** * Returns the current caching mode in use * @return the current caching mode in use */ public CacheMode getCacheMode() { return cacheMode; } /** * Calling this sets the method of caching that will be used. <br> * This is called called by the implementing class to initialize and clear * the caches. Calling this with the current cache mode will initialize the * caches. Once training is complete, call again with {@code null} to * deinitialize the caches. * * @param cacheMode */ public void setCacheMode(CacheMode cacheMode) { if(cacheMode == null) { fullCache = null; partialCache = null; availableRow = null; return; } this.cacheMode = cacheMode; if(vecs != null) accelCache = kernel.getAccelerationCache(vecs); evalCount = 0; cacheEvictions = 0; final int N = vecs == null ? 0 : vecs.size(); if(cacheMode == CacheMode.FULL && vecs != null) { fullCache = new double[N][]; for(int i = 0; i < N; i++) { fullCache[i] = new double[N-i]; Arrays.fill(fullCache[i], Double.NaN); } //Switched to lazy init, hence NaN above // for(int i = 0; i < N; i++) // for(int j = i; j < N; j++) // fullCache[i][j-i] = k(i, j); } else if(cacheMode == CacheMode.ROWS && vecs != null) { partialCache = new ConcurrentCacheLRU<Integer, double[]>(cacheConst); } else if(cacheMode == CacheMode.NONE) fullCache = null; } protected int evalCount = 0; protected int cacheEvictions = 0; public KernelTrick getKernel() { return kernel; } /** * Performs a summation of the form <br> * <big>&#8721;</big> &alpha;<sub>i</sub> k(x<sub>i</sub>, y) <br> * for each support vector and associated alpha value currently stored in * the support vector machine. It is not necessary to call * {@link #setAlphas(double[]) } before calling this, but kernel evaluations * may be slower if this is not done. * @param y the vector to perform the kernel product sum against * @return the sum of the scaled kernel products */ protected double kEvalSum(Vec y) { if (alphas == null) throw new RuntimeException("alphas have not been set"); return kernel.evalSum(vecs, accelCache, alphas, y, 0, alphas.length); } /** * Performs a kernel evaluation of the product between two vectors directly. * This is the slowest way to do a kernel evaluation, and should be avoided * unless there is a specific reason to do so. * <br> * These evaluations will not be counted in {@link #evalCount} * @param a the first vector * @param b the second vector * @return the kernel evaluation of k(a, b) */ protected double kEval(Vec a, Vec b) { return kernel.eval(a, b); } /** * Performs a kernel evaluation of the a'th and b'th vectors in the * {@link #vecs} array. * * @param a the first vector index * @param b the second vector index * @return the kernel evaluation of k(a, b) */ protected double kEval(int a, int b) { if(cacheMode == CacheMode.FULL) { if(a > b) { int tmp = a; a = b; b = tmp; } double val = fullCache[a][b-a]; if(Double.isNaN(val))//lazy init return fullCache[a][b-a] = k(a, b); return val; } else if(cacheMode == CacheMode.ROWS) { double[] cache; if(specific_row_cache_row == a) cache = specific_row_cache_values; else cache = partialCache.get(a); if (cache == null)//not present { //make a row cache = new double[vecs.size()]; Arrays.fill(cache, Double.NaN); double[] cache_missed = partialCache.putIfAbsentAndGet(a, cache); if(cache_missed != null) cache = cache_missed; if (Double.isNaN(cache[b])) return cache[b] = k(a, b); else return cache[b]; } } return k(a, b); } /** * This method allows the caller to hint that they are about to access many * kernel values for a specific row. The row may be selected out from the * cache into its own location to avoid excess LRU overhead. Giving a * negative index indicates that we are done with the row, and removes it. * This method may be called multiple times with different row values. But * when done accessing a specific row, a negative value should be passed in. * * * @param r the row to cache explicitly to avoid LRU overhead. Or a negative * value to indicate that we are done with any specific row. */ protected void accessingRow(int r) { if (r < 0) { specific_row_cache_row = -1; specific_row_cache_values = null; return; } if(cacheMode == CacheMode.ROWS) { double[] cache = partialCache.get(r); if (cache == null)//not present { //make a row cache = new double[vecs.size()]; Arrays.fill(cache, Double.NaN); double[] cache_missed = partialCache.putIfAbsentAndGet(r, cache); if(cache_missed != null) cache = cache_missed; } specific_row_cache_values = cache; specific_row_cache_row = r; } } /** * Internal kernel eval source. Only call directly if you KNOW you will not * be re-using the resulting value and intentionally wish to skip the * caching system * * @param a the first vector index * @param b the second vector index * @return the kernel evaluation of k(a, b) */ protected double k(int a, int b) { evalCount++; return kernel.eval(a, b, vecs, accelCache); } /** * Sparsifies the SVM by removing the vectors with &alpha; = 0 from the * dataset. */ protected void sparsify() { final int N = vecs.size(); int accSize = accelCache == null ? 0 : accelCache.size()/N; int svCount = 0; for(int i = 0; i < N; i++) if(alphas[i] != 0)//Its a support vector { ListUtils.swap(vecs, svCount, i); if(accelCache != null) for(int j = i*accSize; j < (i+1)*accSize; j++) ListUtils.swap(accelCache, svCount*accSize+j-i*accSize, j); alphas[svCount++] = alphas[i]; } vecs = new ArrayList<Vec>(vecs.subList(0, svCount)); alphas = Arrays.copyOfRange(alphas, 0, svCount); } }
15,832
32.402954
110
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/svm/extended/AMM.java
package jsat.classifiers.svm.extended; import java.util.*; import java.util.concurrent.ExecutorService; import jsat.classifiers.ClassificationDataSet; import jsat.linear.Vec; import jsat.utils.IntList; import jsat.utils.ListUtils; import jsat.utils.random.RandomUtil; import jsat.utils.random.XORWOW; /** * This is the batch variant of the Adaptive Multi-Hyperplane Machine (AMM) * algorithm. It is related to linear SVMs where instead of having only a single * weight vector, it is extended to multi-class problems by giving each class * its own weight vector. It is further extended by allowing each class to * dynamically add new weight vectors to increase the non-linearity of the * solution. <br> * This algorithm works best for problems with a very large number of data * points where traditional kernelized SVMs are prohibitively expensive to train * due to computational cost. <br> * While the AMM trained in a batch setting can continue to be updated in an * online fashion, the accuracy may reduce if done. This is because only the * batch variant will reach a local optima.<br> * For this version the {@link #setEpochs(int) } method controls the total * number of iterations of the learning algorithm. A small value in [5, 20] * should be sufficient. * <br> * See: * <ul> * <li>Wang, Z., Djuric, N., Crammer, K., &amp; Vucetic, S. (2011). <i>Trading * representability for scalability Adaptive Multi-Hyperplane Machine for * nonlinear Classification</i>. In Proceedings of the 17th ACM SIGKDD * international conference on Knowledge discovery and data mining - KDD ’11 * (p. 24). New York, New York, USA: ACM Press. doi:10.1145/2020408.2020420</li> * <li>Djuric, N., Lan, L., Vucetic, S., &amp; Wang, Z. (2014). <i>BudgetedSVM: A * Toolbox for Scalable SVM Approximations</i>. Journal of Machine Learning * Research, 14, 3813–3817. Retrieved from * <a href="http://jmlr.org/papers/v14/djuric13a.html">here</a></li> * </ul> * * @author Edward Raff */ public class AMM extends OnlineAMM { private static final long serialVersionUID = -9198419566231617395L; private int subEpochs = 1; /** * Creates a new batch AMM learner */ public AMM() { this(DEFAULT_REGULARIZER); } /** * Creates a new batch AMM learner * @param lambda the regularization value to use */ public AMM(double lambda) { this(lambda, DEFAULT_CLASS_BUDGET); } /** * Creates a new batch AMM learner * @param lambda the regularization value to use * @param classBudget the maximum number of weight vectors for each class */ public AMM(double lambda, int classBudget) { super(lambda, classBudget); setEpochs(10); } /** * Copy constructor * @param toCopy the object to copy */ public AMM(AMM toCopy) { super(toCopy); this.subEpochs = toCopy.subEpochs; } /** * Each iteration of the batch AMM algorithm requires at least one epoch * over the training set. This control how many epochs make up each * iteration of training. * * @param subEpochs the number passes through the training set done on each * iteration of training */ public void setSubEpochs(int subEpochs) { if(subEpochs < 1) throw new IllegalArgumentException("subEpochs must be positive, not " + subEpochs); this.subEpochs = subEpochs; } /** * Returns the number of passes through the data set done on each iteration * @return the number of passes through the data set done on each iteration */ public int getSubEpochs() { return subEpochs; } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { train(dataSet); } @Override public void train(ClassificationDataSet dataSet) { IntList randOrder = new IntList(dataSet.size()); ListUtils.addRange(randOrder, 0, dataSet.size(), 1); Random rand = RandomUtil.getRandom(); int[] Z = new int[randOrder.size()]; /* * For Algorithm 1, instead of a random assignment, we initialized z(1) * by a single scan of data using Online AMM */ setUp(dataSet.getCategories(), dataSet.getNumNumericalVars(), dataSet.getPredicting()); Collections.shuffle(randOrder, rand); //also perform step 1: initialize z(1) for(int i : randOrder) Z[i] = update(dataSet.getDataPoint(i), dataSet.getDataPointCategory(i), Integer.MIN_VALUE); time = 1;//rest the time since we are "Starting" now, and before was just a better than random intial state int outerEpoch = 0; do//2: repeat { /* Solve each sub-problem P(W|z(r)): lines 4 ∼ 7*/ for(int subEpoch = 0; subEpoch < subEpochs; subEpoch++) { Collections.shuffle(randOrder, rand); for(int i : randOrder) Z[i] = update(dataSet.getDataPoint(i), dataSet.getDataPointCategory(i), Z[i]);//only changing value in certain valid cases } // 8: compute z(++r) using (9); /* Reassign z */ int changed = 0; for(int i = 0; i < randOrder.size(); i++) { Vec x_t = dataSet.getDataPoint(i).getNumericalValues(); double z_t_val = 0.0;//infinte implicit zero weight vectors, so max is always at least 0 int z_t = -1;//negative value used to indicate the implicit was largest Map<Integer, Vec> w_yt = weightMatrix.get(dataSet.getDataPointCategory(i)); for(Map.Entry<Integer, Vec> w_yt_entry : w_yt.entrySet()) { Vec v = w_yt_entry.getValue(); double tmp = x_t.dot(v); if(tmp >= z_t_val) { z_t = w_yt_entry.getKey(); z_t_val = tmp; } } if(Z[i] != z_t) { changed++; Z[i] = z_t; } } if(changed == 0) break; } while(++outerEpoch < getEpochs()); } @Override public AMM clone() { return new AMM(this); } }
6,500
33.764706
142
java
JSAT
JSAT-master/JSAT/src/jsat/classifiers/svm/extended/CPM.java
/* * Copyright (C) 2017 Edward Raff <[email protected]> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package jsat.classifiers.svm.extended; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.ExecutorService; import jsat.DataSet; import jsat.classifiers.*; import jsat.classifiers.calibration.BinaryScoreClassifier; import jsat.distributions.Distribution; import jsat.distributions.LogUniform; import jsat.distributions.Uniform; import jsat.exceptions.FailedToFitException; import jsat.linear.*; import jsat.parameters.Parameter; import jsat.parameters.Parameterized; import jsat.utils.FakeExecutor; import jsat.utils.IntList; import jsat.utils.ListUtils; /** * This class implements the Convex Polytope Machine (CPM), which is an * extension of the Linear SVM. It is a binary classifier that has training time * proportionate to the linear case, but can obtain accuracies closer to that of * a kernelized SVM.<br> * Similar to the {@link AMM AMM} classifier, CPM uses multiple linear * hyper-planes to create a non-linear classifier. Increasing the number of * hyper-planes increases training/prediction time, but also increases the * amount of non-linearity the model can tolerate.<br> * <br> * While the CPM implements the {@link BinaryScoreClassifier} interface, the CPM * decision algorithm does not completely lend itself to producing a natural * score in this manner. For this reason you may observe unusual behavior from * the CPM if you rely on this interface, compared with other approaches. * * * <br>See: Kantchelian, A., Tschantz, M. C., Huang, L., Bartlett, P. L., * Joseph, A. D., & Tygar, J. D. (2014). Large-margin Convex Polytope Machine. * In Proceedings of the 27th International Conference on Neural Information * Processing Systems (pp. 3248–3256). Cambridge, MA, USA: MIT Press. Retrieved * from <a href="http://dl.acm.org/citation.cfm?id=2969033.2969189">here</a> * @author Edward Raff <[email protected]> */ public class CPM implements BinaryScoreClassifier, Classifier, Parameterized { private static final long serialVersionUID = 3171068484917637037L; private int epochs; private double lambda; private int K; private double entropyThreshold; private double h; private Matrix Wp; private Matrix Wn; private Vec bp; private Vec bn; /** * Creates a new CPM classifier, with default parameters that should work * well for most cases. */ public CPM() { this(1.0); } /** * Creates a new CPM classifier * @param K the number of hyper-planes to learn with. */ public CPM(int K) { this(1.0, K); } /** * Creates a new CPM classifier * @param lambda the regularization parameter */ public CPM(double lambda) { this(lambda, 16); } /** * Creates a new CPM classifier * @param lambda the regularization parameter * @param K the number of hyper-planes to learn with. */ public CPM(double lambda, int K) { this(lambda, K, 3.0); } /** * Creates a new CPM classifier * @param lambda the regularization parameter * @param K the number of hyper-planes to learn with. * @param entropyThreshold the parameter that encourages non-linearity to be exploited */ public CPM(double lambda, int K, double entropyThreshold) { this(lambda, K, entropyThreshold, 50); } /** * Creates a new CPM classifier * @param lambda the regularization parameter * @param K the number of hyper-planes to learn with. * @param entropyThreshold the parameter that encourages non-linearity to be exploited * @param epochs the number of training iterations */ public CPM(double lambda, int K, double entropyThreshold, int epochs) { setEpochs(epochs); setLambda(lambda); setK(K); setEntropyThreshold(entropyThreshold); } /** * Copy constructor * @param toCopy the object to copy */ public CPM(CPM toCopy) { this.epochs = toCopy.epochs; this.lambda = toCopy.lambda; this.K = toCopy.K; this.entropyThreshold = toCopy.entropyThreshold; this.h = toCopy.h; if(toCopy.Wp != null) this.Wp = toCopy.Wp.clone(); if(toCopy.Wn != null) this.Wn = toCopy.Wn.clone(); if(toCopy.bp != null) this.bp = toCopy.bp.clone(); if(toCopy.bn != null) this.bn = toCopy.bn.clone(); } /** * Sets the entropy threshold used for training. It ensures a diversity of * hyper-planes are used, where larger values encourage using more of the * hyper planes.<br> * <br> * This method is adjusted from the paper's definition so that the input can * be any non-negative value. It is recommended to try values in the range * of [0, 10] * * @param entropyThreshold the non-negative parameter for hyper-plane diversity */ public void setEntropyThreshold(double entropyThreshold) { if(entropyThreshold < 0 || Double.isNaN(entropyThreshold) || Double.isInfinite(entropyThreshold)) throw new IllegalArgumentException("Entropy threshold must be non-negative, not " + entropyThreshold); this.entropyThreshold = entropyThreshold; set_h_properly(); } private void set_h_properly() { h = Math.log(entropyThreshold * K / 10.0)/Math.log(2); if(h <= 0) h = 0; } /** * * @return the non-negative parameter for hyper-plane diversity */ public double getEntropyThreshold() { return entropyThreshold; } /** * Sets the regularization parameter &lambda; to use. Larger values penalize * model complexity. This value is adjusted from the form in the original * paper so that you do not need to consider the number of epochs * explicitly. The effective regularization will be divided by the total * number of training updates. * * @param lambda the regularization parameter value to use, the recommended * range range is (0, 10<sup>4</sup>] */ public void setLambda(double lambda) { this.lambda = lambda; } /** * * @return the regularization parameter value */ public double getLambda() { return lambda; } /** * Sets the number of hyper planes to use when training. A normal linear * model is equivalent to using only 1 hyper plane. The more hyper planes * used, the more modeling capacity the algorithm has, but the slower it * will run. * * @param K the number of hyper planes to use. */ public void setK(int K) { this.K = K; set_h_properly(); } /** * * @return the number of hyper planes to use. */ public int getK() { return K; } /** * Sets the number of whole iterations through the training set that will be * performed for training * @param epochs the number of whole iterations through the data set */ public void setEpochs(int epochs) { if(epochs < 1) throw new IllegalArgumentException("epochs must be a positive value"); this.epochs = epochs; } /** * Returns the number of epochs used for training * @return the number of epochs used for training */ public int getEpochs() { return epochs; } @Override public CategoricalResults classify(DataPoint data) { Vec x = data.getNumericalValues(); double pos_score = Wp.multiply(x).add(bp).max(); double neg_score = Wn.multiply(x).add(bn).max(); CategoricalResults cr = new CategoricalResults(2); if(neg_score > 0 && pos_score > 0)//ambigious case, lets go with larger magnitude { if(neg_score > pos_score) cr.setProb(0, 1.0); else cr.setProb(1, 1.0); } else if(neg_score > 0) cr.setProb(0, 1.0); else if(pos_score > 0) cr.setProb(1, 1.0); else if(neg_score > pos_score )//not actually how describes in paper, but its ambigious - so lets use larger to tie break //ambig b/c if no model claims ownership, we get a score of 0 cr.setProb(0, 1.0); else cr.setProb(1, 1.0); return cr; } @Override public double getScore(DataPoint dp) { Vec x = dp.getNumericalValues(); double pos_score = Wp.multiply(x).add(bp).max(); double neg_score = Wn.multiply(x).add(bn).max(); return pos_score-neg_score; } @Override public boolean supportsWeightedData() { return false; } /** * * @param dots dot product between the input and each of the k hyper planes * @param owned a count of how many data points are assigned to this hyper plane * @param assignments maps each data point to the hyper plane that owns it. May have negative values for points not yet assigned * @param assigned_positive_instances the number of <bold>positive</bold> instances taht have been assigned to a hyper plane */ private int ASSIGN(Vec dots, int indx, int k_true_max, int[] owned, int[] assignments, int assigned_positive_instances) { //Done outside this function // int k_true_max = 0; // for(int i = 1; i < dots.length(); i++) // if(dots.get(i) > dots.get(k_true_max)) // k_true_max = i; int old_owner = assignments[indx]; double cur_entropy = 0; double new_entropy = Double.POSITIVE_INFINITY; int max_owned = 0; if(assigned_positive_instances > K*10)//we have enough assignments to start estimating entropy { new_entropy = 0; for(int i = 0; i < K; i++) { max_owned = Math.max(max_owned, owned[i]);//used later // double p_i = owned[i]/(double)assigned_positive_instances; double numer = owned[i]; double denom = assigned_positive_instances; if(numer > 0 ) // cur_entropy += -p_i * Math.log(p_i)/Math.log(2); cur_entropy += -numer*(Math.log(numer)-Math.log(denom))/(Math.log(2)*denom); //now calculate for new_entropy if(old_owner < 0)//every point has a differnt value, b/c denominator changes { denom++; if(i == k_true_max)//numer changes here too numer++; if(numer > 0 ) new_entropy += -numer*(Math.log(numer)-Math.log(denom))/(Math.log(2)*denom); } else if(old_owner == k_true_max)//no change in ownership, means no change in entropy { new_entropy = cur_entropy; } else//change in ownership, denom remains the same, numer may change { if(i == k_true_max) numer++; else if(i == old_owner) numer--; if(numer > 0 ) new_entropy += -numer*(Math.log(numer)-Math.log(denom))/(Math.log(2)*denom); } } new_entropy += cur_entropy;//new was calcualted as a delta from cur, so by adding we get the correct value } if(new_entropy >= h)//if ENTROPY(UNADJ +(x, kunadj)) ≥ h then return k_true_max; //else //find max that would result in an increase in entropy int k_inc_max = 0; if (old_owner >= 0)//don't need to compute entropy, moving to any position that owns fewer would increase entropy { for (int i = 1; i < dots.length(); i++) if (owned[old_owner] > owned[i] && dots.get(i) > dots.get(k_inc_max)) k_inc_max = i; } else//not assigned, assign to anyone owns less than the most to improve { double best_score = Double.NEGATIVE_INFINITY; for (int i = 1; i < dots.length(); i++) if (max_owned > owned[i] && dots.get(i) > best_score) { k_inc_max = i; best_score = dots.get(i); } if(Double.isInfinite(best_score))//why couldn't we find someone? Bail out return k_true_max;//Lets just give the original max } return k_inc_max; } /** * Training procedure that can be applied to each version of the CPM * sub-problem. * * @param D the dataset to train on * @param W the weight matrix of vectors to use * @param b a vector that stores the associated bias terms for each weigh * vector. * @param sign_mul Either positive or negative 1. Controls whether or not * the positive or negative class is to be enveloped by the polytype */ private void sgdTrain(ClassificationDataSet D, MatrixOfVecs W, Vec b, int sign_mul, boolean parallel) { IntList order = new IntList(D.size()); ListUtils.addRange(order, 0, D.size(), 1); final double lambda_adj = lambda/(D.size()*epochs); int[] owned = new int[K];//how many points does thsi guy own? int assigned_positive_instances = 0;//how many points in the positive class have been assigned? int[] assignments = new int[D.size()];//who owns each data point Arrays.fill(assignments, -1);//Starts out that no one is assigned! Vec dots = new DenseVector(W.rows()); long t = 0; for(int epoch = 0; epoch < epochs; epoch++) { Collections.shuffle(order); for(int i : order) { t++; double eta = 1/(lambda_adj*t); Vec x_i = D.getDataPoint(i).getNumericalValues(); int y_i = (D.getDataPointCategory(i)*2-1)*sign_mul; //this sets dots = bias, which we then add to with matrix-vector product //result is the same as dots = W x_i + b b.copyTo(dots); W.multiply(x_i, 1.0, dots); if(y_i == -1) { for(int k = 0; k < K; k++) if(dots.get(k) > -1) { W.getRowView(k).mutableSubtract(eta, x_i); b.increment(k, -eta); } } else//y_i == 1 { int k_true_max = 0; for(int k = 1; k < dots.length(); k++) if(dots.get(k) > dots.get(k_true_max)) k_true_max = k; if(dots.get(k_true_max) < 1) { int z = ASSIGN(dots, i, k_true_max, owned, assignments, assigned_positive_instances); W.getRowView(z).mutableAdd(eta, x_i); b.increment(z, eta); //book keeping if(assignments[i] < 0)//first assignment, inc counter assigned_positive_instances++; else//change owner, decrement ownership count owned[assignments[i]]--; owned[z]++; assignments[i] = z; } } // W.mutableMultiply(1-eta*lambda); //equivalent form, more stable W.mutableMultiply(1-1.0/t); b.mutableMultiply(1-1.0/t); } } } @Override public void train(ClassificationDataSet dataSet, boolean parallel) { if(dataSet.getPredicting().getNumOfCategories() > 2) throw new FailedToFitException("CPM is a binary classifier, it can not be trained on a dataset with " + dataSet.getPredicting().getNumOfCategories() + " classes"); final int d = dataSet.getNumNumericalVars(); List<Vec> Wv_p = new ArrayList<Vec>(K); List<Vec> Wv_n = new ArrayList<Vec>(K); bp = new DenseVector(K); bn = new DenseVector(K); for(int i = 0; i < K; i++) { Wv_p.add(new ScaledVector(new DenseVector(d))); Wv_n.add(new ScaledVector(new DenseVector(d))); } MatrixOfVecs W_p = new MatrixOfVecs(Wv_p); MatrixOfVecs W_n = new MatrixOfVecs(Wv_n); sgdTrain(dataSet, W_p, bp, +1, parallel); sgdTrain(dataSet, W_n, bn, -1, parallel); this.Wp = new DenseMatrix(W_p); this.Wn = new DenseMatrix(W_n); } @Override public CPM clone() { return new CPM(this); } /** * Provides a distribution of reasonable values for the * {@link #setLambda(double) &lambda;} parameter * * @param d the dataset to get the guess for * @return the distribution to search this parameter from */ public static Distribution guessLambda(DataSet d) { return new LogUniform(1e-1, 1e4); } /** * Provides a distribution of reasonable values for the {@link #setEntropyThreshold(double) * } parameter * * @param d the dataset to get the guess for * @return the distribution to search this parameter from */ public static Distribution guessEntropyThreshold(DataSet d) { return new Uniform(1e-1, 10); } }
18,813
33.90538
175
java