repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/MaxEntL1Trainer.java
package cc.mallet.classify; import cc.mallet.optimize.Optimizer; import cc.mallet.optimize.OrthantWiseLimitedMemoryBFGS; import cc.mallet.types.InstanceList; public class MaxEntL1Trainer extends MaxEntTrainer { private static final long serialVersionUID = 1L; double l1Weight = 1.0; public MaxEntL1Trainer() { super(Double.MAX_VALUE); } public MaxEntL1Trainer(double l1wt) { super(Double.MAX_VALUE); this.l1Weight = l1wt; } public MaxEntL1Trainer(MaxEnt initClassifier) { super(initClassifier); this.gaussianPriorVariance = Double.MAX_VALUE; } public Optimizer getOptimizer() { if (optimizer == null && optimizable != null) optimizer = new OrthantWiseLimitedMemoryBFGS(optimizable, l1Weight); return optimizer; } // commented by Limin Yao, use L1 regularization instead public Optimizer getOptimizer(InstanceList trainingSet) { if (trainingSet != this.trainingSet || optimizable == null) { getOptimizable(trainingSet); optimizer = null; } if (optimizer == null) optimizer = new OrthantWiseLimitedMemoryBFGS(optimizable, l1Weight); return optimizer; } }
1,108
24.790698
71
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/C45.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; import java.util.logging.Logger; import cc.mallet.classify.Boostable; import cc.mallet.classify.Classification; import cc.mallet.classify.Classifier; import cc.mallet.pipe.Pipe; import cc.mallet.types.Alphabet; import cc.mallet.types.FeatureVector; import cc.mallet.types.GainRatio; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.util.MalletLogger; import cc.mallet.util.Maths; /** * A C4.5 Decision Tree classifier. * * @see C45Trainer * @author Gary Huang <a href="mailto:[email protected]">[email protected]</a> */ public class C45 extends Classifier implements Boostable, Serializable { private static Logger logger = MalletLogger.getLogger(C45.class.getName()); Node m_root; public C45 (Pipe instancePipe, C45.Node root) { super (instancePipe); m_root = root; } public Node getRoot () { return m_root; } private Node getLeaf (Node node, FeatureVector fv) { if (node.getLeftChild() == null && node.getRightChild() == null) return node; else if (fv.value(node.getGainRatio().getMaxValuedIndex()) <= node.getGainRatio().getMaxValuedThreshold()) return getLeaf(node.getLeftChild(), fv); else return getLeaf(node.getRightChild(), fv); } public Classification classify (Instance instance) { FeatureVector fv = (FeatureVector) instance.getData (); assert (instancePipe == null || fv.getAlphabet () == this.instancePipe.getDataAlphabet ()); Node leaf = getLeaf(m_root, fv); return new Classification (instance, this, leaf.getGainRatio().getBaseLabelDistribution()); } /** * Prune the tree using minimum description length */ public void prune() { getRoot().computeCostAndPrune(); } /** * @return the total number of nodes in this tree */ public int getSize() { Node root = getRoot(); if (root == null) return 0; return 1+root.getNumDescendants(); } /** * Prints the tree */ public void print() { if (getRoot() != null) getRoot().print(); } public static class Node implements Serializable { private static final long serialVersionUID = 1L; GainRatio m_gainRatio; // the entire set of instances given to the root node InstanceList m_ilist; // indices of instances at this node int[] m_instIndices; // data vocabulary Alphabet m_dataDict; // mininum number of instances allowed in this node int m_minNumInsts; Node m_parent, m_leftChild, m_rightChild; public Node(InstanceList ilist, Node parent, int minNumInsts) { this(ilist, parent, minNumInsts, null); } public Node(InstanceList ilist, Node parent, int minNumInsts, int[] instIndices) { if (instIndices == null) { instIndices = new int[ilist.size()]; for (int ii = 0; ii < instIndices.length; ii++) instIndices[ii] = ii; } m_gainRatio = GainRatio.createGainRatio(ilist, instIndices, minNumInsts); m_ilist = ilist; m_instIndices = instIndices; m_dataDict = m_ilist.getDataAlphabet(); m_minNumInsts = minNumInsts; m_parent = parent; m_leftChild = m_rightChild = null; } /** The root has depth zero. */ public int depth () { int depth = 0; Node p = m_parent; while (p != null) { p = p.m_parent; depth++; } return depth; } public int getSize() { return m_instIndices.length; } public boolean isLeaf() { return (m_leftChild == null && m_rightChild == null); } public boolean isRoot() { return m_parent == null; } public Node getParent() { return m_parent; } public Node getLeftChild() { return m_leftChild; } public Node getRightChild() { return m_rightChild; } public GainRatio getGainRatio() { return m_gainRatio; } public Object getSplitFeature() { return m_dataDict.lookupObject(m_gainRatio.getMaxValuedIndex()); } public InstanceList getInstances() { InstanceList ret = new InstanceList(m_ilist.getPipe()); for (int ii = 0; ii < m_instIndices.length; ii++) ret.add(m_ilist.get(m_instIndices[ii])); return ret; } /** * Count the number of non-leaf descendant nodes */ public int getNumDescendants() { if (isLeaf()) return 0; int count = 0; if (! getLeftChild().isLeaf()) count += 1 + getLeftChild().getNumDescendants(); if (! getRightChild().isLeaf()) count += 1 + getRightChild().getNumDescendants(); return count; } public void split() { if (m_ilist == null) throw new IllegalStateException ("Frozen. Cannot split."); int numLeftChildren = 0; boolean[] toLeftChild = new boolean[m_instIndices.length]; for (int i = 0; i < m_instIndices.length; i++) { Instance instance = m_ilist.get(m_instIndices[i]); FeatureVector fv = (FeatureVector) instance.getData(); if (fv.value (m_gainRatio.getMaxValuedIndex()) <= m_gainRatio.getMaxValuedThreshold()) { toLeftChild[i] = true; numLeftChildren++; } else toLeftChild[i] = false; } logger.info("leftChild.size=" + numLeftChildren + " rightChild.size=" + (m_instIndices.length-numLeftChildren)); int[] leftIndices = new int[numLeftChildren]; int[] rightIndices = new int[m_instIndices.length - numLeftChildren]; int li = 0, ri = 0; for (int i = 0; i < m_instIndices.length; i++) { if (toLeftChild[i]) leftIndices[li++] = m_instIndices[i]; else rightIndices[ri++] = m_instIndices[i]; } m_leftChild = new Node(m_ilist, this, m_minNumInsts, leftIndices); m_rightChild = new Node(m_ilist, this, m_minNumInsts, rightIndices); } public double computeCostAndPrune() { double costS = getMDL(); if (isLeaf()) return costS + 1; double minCost1 = getLeftChild().computeCostAndPrune(); double minCost2 = getRightChild().computeCostAndPrune(); double costSplit = Math.log(m_gainRatio.getNumSplitPointsForBestFeature()) / GainRatio.log2; double minCostN = Math.min(costS+1, costSplit+1+minCost1+minCost2); if (Maths.almostEquals(minCostN, costS+1)) m_leftChild = m_rightChild = null; return minCostN; } /** * Calculates the minimum description length of this node, i.e., * the length of the binary encoding that describes the feature * and the split value used at this node */ public double getMDL() { int numClasses = m_ilist.getTargetAlphabet().size(); double mdl = getSize() * getGainRatio().getBaseEntropy(); mdl += ((numClasses-1) * Math.log(getSize() / 2.0)) / (2 * GainRatio.log2); double piPow = Math.pow(Math.PI, numClasses/2.0); double gammaVal = Maths.gamma(numClasses/2.0); mdl += Math.log(piPow/gammaVal) / GainRatio.log2; return mdl; } /** * Saves memory by allowing ilist to be garbage collected * (deletes this node's associated instance list) */ public void stopGrowth () { if (m_leftChild != null) m_leftChild.stopGrowth(); if (m_rightChild != null) m_rightChild.stopGrowth(); m_ilist = null; } public String getName() { return getStringBufferName().toString(); } public StringBuffer getStringBufferName() { StringBuffer sb = new StringBuffer(); if (m_parent == null) return sb.append("root"); else if (m_parent.getParent() == null) { sb.append("(\""); sb.append(m_dataDict.lookupObject(m_parent.getGainRatio().getMaxValuedIndex()).toString()); sb.append("\""); if (m_parent.getLeftChild() == this) sb.append(" <= "); else sb.append(" > "); sb.append(m_parent.getGainRatio().getMaxValuedThreshold()); return sb.append(")"); } else { sb.append(m_parent.getStringBufferName()); sb.append(" && (\""); sb.append(m_dataDict.lookupObject(m_parent.getGainRatio().getMaxValuedIndex()).toString()); sb.append("\""); if (m_parent.getLeftChild() == this) sb.append(" <= "); else sb.append(" > "); sb.append(m_parent.getGainRatio().getMaxValuedThreshold()); return sb.append(")"); } } /** * Prints the tree rooted at this node */ public void print() { print(""); } public void print(String prefix) { if (isLeaf()) { int bestLabelIndex = getGainRatio().getBaseLabelDistribution().getBestIndex(); int numMajorityLabel = (int) (getGainRatio().getBaseLabelDistribution().value(bestLabelIndex) * getSize()); System.out.println("root:" + getGainRatio().getBaseLabelDistribution().getBestLabel() + " " + numMajorityLabel + "/" + getSize()); } else { String featName = m_dataDict.lookupObject(getGainRatio().getMaxValuedIndex()).toString(); double threshold = getGainRatio().getMaxValuedThreshold(); System.out.print(prefix + "\"" + featName + "\" <= " + threshold + ":"); if (m_leftChild.isLeaf()) { int bestLabelIndex = m_leftChild.getGainRatio().getBaseLabelDistribution().getBestIndex(); int numMajorityLabel = (int) (m_leftChild.getGainRatio().getBaseLabelDistribution().value(bestLabelIndex) * m_leftChild.getSize()); System.out.println(m_leftChild.getGainRatio().getBaseLabelDistribution().getBestLabel() + " " + numMajorityLabel + "/" + m_leftChild.getSize()); } else { System.out.println(); m_leftChild.print(prefix + "| "); } System.out.print(prefix + "\"" + featName + "\" > " + threshold + ":"); if (m_rightChild.isLeaf()) { int bestLabelIndex = m_rightChild.getGainRatio().getBaseLabelDistribution().getBestIndex(); int numMajorityLabel = (int) (m_rightChild.getGainRatio().getBaseLabelDistribution().value(bestLabelIndex) * m_rightChild.getSize()); System.out.println(m_rightChild.getGainRatio().getBaseLabelDistribution().getBestLabel() + " " + numMajorityLabel + "/" + m_rightChild.getSize()); } else { System.out.println(); m_rightChild.print(prefix + "| "); } } } } // Serialization // serialVersionUID is overriden to prevent innocuous changes in this // class from making the serialization mechanism think the external // format has changed. private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject(ObjectOutputStream out) throws IOException { out.writeInt(CURRENT_SERIAL_VERSION); out.writeObject(getInstancePipe()); out.writeObject(m_root); } private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt(); if (version != CURRENT_SERIAL_VERSION) throw new ClassNotFoundException("Mismatched C45 versions: wanted " + CURRENT_SERIAL_VERSION + ", got " + version); instancePipe = (Pipe) in.readObject(); m_root = (Node) in.readObject(); } }
11,186
29.649315
151
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/MCMaxEntTrainer.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.util.logging.*; import java.util.*; import java.io.*; import cc.mallet.classify.Classifier; import cc.mallet.optimize.LimitedMemoryBFGS; import cc.mallet.optimize.Optimizable; import cc.mallet.optimize.Optimizer; import cc.mallet.optimize.tests.*; import cc.mallet.pipe.Pipe; import cc.mallet.types.Alphabet; import cc.mallet.types.ExpGain; import cc.mallet.types.FeatureInducer; import cc.mallet.types.FeatureSelection; import cc.mallet.types.FeatureVector; import cc.mallet.types.GradientGain; import cc.mallet.types.InfoGain; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.Label; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.LabelVector; import cc.mallet.types.Labeling; import cc.mallet.types.MatrixOps; import cc.mallet.types.RankedFeatureVector; import cc.mallet.types.Vector; import cc.mallet.util.CommandOption; import cc.mallet.util.MalletLogger; import cc.mallet.util.MalletProgressMessageLogger; import cc.mallet.util.Maths; // Does not currently handle instances that are labeled with distributions // instead of a single label. /** * The trainer for a Maximum Entropy classifier. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class MCMaxEntTrainer extends ClassifierTrainer<MCMaxEnt> implements Boostable, Serializable //implements CommandOption.ListProviding { private static Logger logger = MalletLogger.getLogger(MCMaxEntTrainer.class.getName()); private static Logger progressLogger = MalletProgressMessageLogger.getLogger(MCMaxEntTrainer.class.getName()+"-pl"); int numGetValueCalls = 0; int numGetValueGradientCalls = 0; int numIterations = 10; public static final String EXP_GAIN = "exp"; public static final String GRADIENT_GAIN = "grad"; public static final String INFORMATION_GAIN = "info"; // xxx Why does TestMaximizable fail when this variance is very small? static final double DEFAULT_GAUSSIAN_PRIOR_VARIANCE = .1; // note used to be 1 static final double DEFAULT_HYPERBOLIC_PRIOR_SLOPE = 0.2; static final double DEFAULT_HYPERBOLIC_PRIOR_SHARPNESS = 10.0; static final Class DEFAULT_MAXIMIZER_CLASS = LimitedMemoryBFGS.class; // CPAL boolean usingMultiConditionalTraining = true; boolean usingHyperbolicPrior = false; double gaussianPriorVariance = DEFAULT_GAUSSIAN_PRIOR_VARIANCE; double hyperbolicPriorSlope = DEFAULT_HYPERBOLIC_PRIOR_SLOPE; double hyperbolicPriorSharpness = DEFAULT_HYPERBOLIC_PRIOR_SHARPNESS; Class maximizerClass = DEFAULT_MAXIMIZER_CLASS; double generativeWeighting = 1.0; MaximizableTrainer mt; MCMaxEnt initialClassifier; // CPAL static CommandOption.Boolean usingMultiConditionalTrainingOption = new CommandOption.Boolean (MCMaxEntTrainer.class, "useMCTraining", "true|false", true, true, "Use MultiConditional Training", null); static CommandOption.Boolean usingHyperbolicPriorOption = new CommandOption.Boolean (MCMaxEntTrainer.class, "useHyperbolicPrior", "true|false", false, false, "Use hyperbolic (close to L1 penalty) prior over parameters", null); static CommandOption.Double gaussianPriorVarianceOption = new CommandOption.Double (MCMaxEntTrainer.class, "gaussianPriorVariance", "FLOAT", true, 10.0, "Variance of the gaussian prior over parameters", null); static CommandOption.Double hyperbolicPriorSlopeOption = new CommandOption.Double (MCMaxEntTrainer.class, "hyperbolicPriorSlope", "FLOAT", true, 0.2, "Slope of the (L1 penalty) hyperbolic prior over parameters", null); static CommandOption.Double hyperbolicPriorSharpnessOption = new CommandOption.Double (MCMaxEntTrainer.class, "hyperbolicPriorSharpness", "FLOAT", true, 10.0, "Sharpness of the (L1 penalty) hyperbolic prior over parameters", null); static final CommandOption.List commandOptions = new CommandOption.List ( "MCMaximum Entropy Classifier", new CommandOption[] { usingHyperbolicPriorOption, gaussianPriorVarianceOption, hyperbolicPriorSlopeOption, hyperbolicPriorSharpnessOption, usingMultiConditionalTrainingOption, // CPAL }); public static CommandOption.List getCommandOptionList () { return commandOptions; } /* public MCMaxEntTrainer(Maximizer.ByGradient maximizer) { this.maximizerByGradient = maximizer; this.usingHyperbolicPrior = false; } */ public MCMaxEntTrainer (CommandOption.List col) { this.usingHyperbolicPrior = usingHyperbolicPriorOption.value; this.gaussianPriorVariance = gaussianPriorVarianceOption.value; this.hyperbolicPriorSlope = hyperbolicPriorSlopeOption.value; this.hyperbolicPriorSharpness = hyperbolicPriorSharpnessOption.value; this.usingMultiConditionalTraining = usingMultiConditionalTrainingOption.value; } public MCMaxEntTrainer (MCMaxEnt initialClassifier) { this.initialClassifier = initialClassifier; } public MCMaxEntTrainer () { this (false); } public MCMaxEntTrainer (boolean useHyperbolicPrior) { this.usingHyperbolicPrior = useHyperbolicPrior; } /** Constructs a trainer with a parameter to avoid overtraining. 1.0 is * usually a reasonable default value. */ public MCMaxEntTrainer (double gaussianPriorVariance) { this.usingHyperbolicPrior = false; this.gaussianPriorVariance = gaussianPriorVariance; } // CPAL - added this to do MultiConditionalTraining public MCMaxEntTrainer (double gaussianPriorVariance, boolean useMultiConditionalTraining ) { this.usingHyperbolicPrior = false; this.usingMultiConditionalTraining = useMultiConditionalTraining; this.gaussianPriorVariance = gaussianPriorVariance; } public MCMaxEntTrainer (double hyperbolicPriorSlope, double hyperbolicPriorSharpness) { this.usingHyperbolicPrior = true; this.hyperbolicPriorSlope = hyperbolicPriorSlope; this.hyperbolicPriorSharpness = hyperbolicPriorSharpness; } public Optimizable.ByGradientValue getMaximizableTrainer (InstanceList ilist) { if (ilist == null) return new MaximizableTrainer (); return new MaximizableTrainer (ilist, null); } /** * Specifies the maximum number of iterations to run during a single call * to <code>train</code> or <code>trainWithFeatureInduction</code>. Not * currently functional. * @return This trainer */ // XXX Since we maximize before using numIterations, this doesn't work. // Is that a bug? If so, should the default numIterations be higher? public MCMaxEntTrainer setNumIterations (int i) { numIterations = i; return this; } public MCMaxEntTrainer setUseHyperbolicPrior (boolean useHyperbolicPrior) { this.usingHyperbolicPrior = useHyperbolicPrior; return this; } /** * Sets a parameter to prevent overtraining. A smaller variance for the prior * means that feature weights are expected to hover closer to 0, so extra * evidence is required to set a higher weight. * @return This trainer */ public MCMaxEntTrainer setGaussianPriorVariance (double gaussianPriorVariance) { this.usingHyperbolicPrior = false; this.gaussianPriorVariance = gaussianPriorVariance; return this; } public MCMaxEntTrainer setHyperbolicPriorSlope(double hyperbolicPriorSlope) { this.usingHyperbolicPrior = true; this.hyperbolicPriorSlope = hyperbolicPriorSlope; return this; } public MCMaxEntTrainer setHyperbolicPriorSharpness (double hyperbolicPriorSharpness) { this.usingHyperbolicPrior = true; this.hyperbolicPriorSharpness = hyperbolicPriorSharpness; return this; } public MCMaxEnt getClassifier () { return mt.getClassifier(); } public MCMaxEnt train (InstanceList trainingSet) { logger.fine ("trainingSet.size() = "+trainingSet.size()); mt = new MaximizableTrainer (trainingSet, (MCMaxEnt)initialClassifier); Optimizer maximizer = new LimitedMemoryBFGS(mt); // CPAL - change the tolerance for large vocab experiments ((LimitedMemoryBFGS)maximizer).setTolerance(.00001); // std is .0001; maximizer.optimize (); // XXX given the loop below, this seems wrong. logger.info("MCMaxEnt ngetValueCalls:"+getValueCalls()+"\nMCMaxEnt ngetValueGradientCalls:"+getValueGradientCalls()); // boolean converged; // // for (int i = 0; i < numIterations; i++) { // converged = maximizer.maximize (mt, 1); // if (converged) // break; // else if (evaluator != null) // if (!evaluator.evaluate (mt.getClassifier(), converged, i, mt.getValue(), // trainingSet, validationSet, testSet)) // break; // } // TestMaximizable.testValueAndGradient (mt); progressLogger.info("\n"); // progess messages are on one line; move on. return mt.getClassifier (); } /** * <p>Like the other version of <code>trainWithFeatureInduction</code>, but * allows some default options to be changed.</p> * * @param maxent An initial partially-trained classifier (default <code>null</code>). * This classifier may be modified during training. * @param gainName The estimate of gain (log-likelihood increase) we want our chosen * features to maximize. * Should be one of <code>MaxEntTrainer.EXP_GAIN</code>, * <code>MaxEntTrainer.GRADIENT_GAIN</code>, or * <code>MaxEntTrainer.INFORMATION_GAIN</code> (default <code>EXP_GAIN</code>). * * @return The trained <code>MaxEnt</code> classifier */ /* public Classifier trainWithFeatureInduction (InstanceList trainingData, InstanceList validationData, InstanceList testingData, ClassifierEvaluating evaluator, MCMaxEnt maxent, int totalIterations, int numIterationsBetweenFeatureInductions, int numFeatureInductions, int numFeaturesPerFeatureInduction, String gainName) { // XXX This ought to be a parameter, except that setting it to true can // crash training ("Jump too small"). boolean saveParametersDuringFI = false; Alphabet inputAlphabet = trainingData.getDataAlphabet(); Alphabet outputAlphabet = trainingData.getTargetAlphabet(); if (maxent == null) maxent = new MCMaxEnt(trainingData.getPipe(), new double[(1+inputAlphabet.size()) * outputAlphabet.size()]); int trainingIteration = 0; int numLabels = outputAlphabet.size(); // Initialize feature selection FeatureSelection globalFS = trainingData.getFeatureSelection(); if (globalFS == null) { // Mask out all features; some will be added later by FeatureInducer.induceFeaturesFor(.) globalFS = new FeatureSelection (trainingData.getDataAlphabet()); trainingData.setFeatureSelection (globalFS); } if (validationData != null) validationData.setFeatureSelection (globalFS); if (testingData != null) testingData.setFeatureSelection (globalFS); maxent = new MCMaxEnt(maxent.getInstancePipe(), maxent.getParameters(), globalFS); // Run feature induction for (int featureInductionIteration = 0; featureInductionIteration < numFeatureInductions; featureInductionIteration++) { // Print out some feature information logger.info ("Feature induction iteration "+featureInductionIteration); // Train the model a little bit. We don't care whether it converges; we // execute all feature induction iterations no matter what. if (featureInductionIteration != 0) { // Don't train until we have added some features setNumIterations(numIterationsBetweenFeatureInductions); maxent = (MCMaxEnt)this.train (trainingData, validationData, testingData, evaluator, maxent); } trainingIteration += numIterationsBetweenFeatureInductions; logger.info ("Starting feature induction with "+(1+inputAlphabet.size())+ " features over "+numLabels+" labels."); // Create the list of error tokens InstanceList errorInstances = new InstanceList (trainingData.getDataAlphabet(), trainingData.getTargetAlphabet()); // This errorInstances.featureSelection will get examined by FeatureInducer, // so it can know how to add "new" singleton features errorInstances.setFeatureSelection (globalFS); List errorLabelVectors = new ArrayList(); // these are length-1 vectors for (int i = 0; i < trainingData.size(); i++) { Instance instance = trainingData.get(i); FeatureVector inputVector = (FeatureVector) instance.getData(); Label trueLabel = (Label) instance.getTarget(); // Having trained using just the current features, see how we classify // the training data now. Classification classification = maxent.classify(instance); if (!classification.bestLabelIsCorrect()) { errorInstances.add(inputVector, trueLabel, null, null); errorLabelVectors.add(classification.getLabelVector()); } } logger.info ("Error instance list size = "+errorInstances.size()); int s = errorLabelVectors.size(); LabelVector[] lvs = new LabelVector[s]; for (int i = 0; i < s; i++) { lvs[i] = (LabelVector)errorLabelVectors.get(i); } RankedFeatureVector.Factory gainFactory = null; if (gainName.equals (EXP_GAIN)) gainFactory = new ExpGain.Factory (lvs, gaussianPriorVariance); else if (gainName.equals(GRADIENT_GAIN)) gainFactory = new GradientGain.Factory (lvs); else if (gainName.equals(INFORMATION_GAIN)) gainFactory = new InfoGain.Factory (); else throw new IllegalArgumentException("Unsupported gain name: "+gainName); FeatureInducer klfi = new FeatureInducer (gainFactory, errorInstances, numFeaturesPerFeatureInduction, 2*numFeaturesPerFeatureInduction, 2*numFeaturesPerFeatureInduction); // Note that this adds features globally, but not on a per-transition basis klfi.induceFeaturesFor (trainingData, false, false); if (testingData != null) klfi.induceFeaturesFor (testingData, false, false); logger.info ("MCMaxEnt FeatureSelection now includes "+globalFS.cardinality()+" features"); klfi = null; double[] newParameters = new double[(1+inputAlphabet.size()) * outputAlphabet.size()]; // XXX (Executing this block often causes an error during training; I don't know why.) if (saveParametersDuringFI) { // Keep current parameter values // XXX This relies on the implementation detail that the most recent features // added to an Alphabet get the highest indices. // Count parameters per output label int oldParamCount = maxent.parameters.length / outputAlphabet.size(); int newParamCount = 1+inputAlphabet.size(); // Copy params into the proper locations for (int i=0; i<outputAlphabet.size(); i++) { System.arraycopy(maxent.parameters, i*oldParamCount, newParameters, i*newParamCount, oldParamCount); } for (int i=0; i<oldParamCount; i++) if (maxent.parameters[i] != newParameters[i]) { System.out.println(maxent.parameters[i]+" "+newParameters[i]); System.exit(0); } } maxent.parameters = newParameters; maxent.defaultFeatureIndex = inputAlphabet.size(); } // Finished feature induction logger.info("Ended with "+globalFS.cardinality()+" features."); setNumIterations(totalIterations - trainingIteration); return this.train (trainingData, validationData, testingData, evaluator, maxent); } */ // XXX Should these really be public? Why? /** Counts how many times this trainer has computed the gradient of the * log probability of training labels. */ public int getValueGradientCalls() {return numGetValueGradientCalls;} /** Counts how many times this trainer has computed the * log probability of training labels. */ public int getValueCalls() {return numGetValueCalls;} // public int getIterations() {return maximizerByGradient.getIterations();} public String toString() { return "MCMaxEntTrainer" // + "("+maximizerClass.getName()+") " + ",numIterations=" + numIterations + (usingHyperbolicPrior ? (",hyperbolicPriorSlope="+hyperbolicPriorSlope+ ",hyperbolicPriorSharpness="+hyperbolicPriorSharpness) : (",gaussianPriorVariance="+gaussianPriorVariance)); } // A private inner class that wraps up a MCMaxEnt classifier and its training data. // The result is a maximize.Maximizable function. private class MaximizableTrainer implements Optimizable.ByGradientValue { double[] parameters, constraints, cachedGradient; MCMaxEnt theClassifier; InstanceList trainingList; // The expectations are (temporarily) stored in the cachedGradient double cachedValue; boolean cachedValueStale; boolean cachedGradientStale; int numLabels; int numFeatures; int defaultFeatureIndex; // just for clarity FeatureSelection featureSelection; FeatureSelection[] perLabelFeatureSelection; public MaximizableTrainer (){} public MaximizableTrainer (InstanceList ilist, MCMaxEnt initialClassifier) { this.trainingList = ilist; Alphabet fd = ilist.getDataAlphabet(); LabelAlphabet ld = (LabelAlphabet) ilist.getTargetAlphabet(); // Don't fd.stopGrowth, because someone might want to do feature induction ld.stopGrowth(); // Add one feature for the "default feature". this.numLabels = ld.size(); this.numFeatures = fd.size() + 1; this.defaultFeatureIndex = numFeatures-1; this.parameters = new double [numLabels * numFeatures]; this.constraints = new double [numLabels * numFeatures]; this.cachedGradient = new double [numLabels * numFeatures]; Arrays.fill (parameters, 0.0); Arrays.fill (constraints, 0.0); Arrays.fill (cachedGradient, 0.0); this.featureSelection = ilist.getFeatureSelection(); this.perLabelFeatureSelection = ilist.getPerLabelFeatureSelection(); // Add the default feature index to the selection if (featureSelection != null) featureSelection.add (defaultFeatureIndex); if (perLabelFeatureSelection != null) for (int i = 0; i < perLabelFeatureSelection.length; i++) perLabelFeatureSelection[i].add (defaultFeatureIndex); // xxx Later change this to allow both to be set, but select which one to use by a boolean flag? assert (featureSelection == null || perLabelFeatureSelection == null); if (initialClassifier != null) { this.theClassifier = initialClassifier; this.parameters = theClassifier.parameters; this.featureSelection = theClassifier.featureSelection; this.perLabelFeatureSelection = theClassifier.perClassFeatureSelection; this.defaultFeatureIndex = theClassifier.defaultFeatureIndex; assert (initialClassifier.getInstancePipe() == ilist.getPipe()); } else if (this.theClassifier == null) { this.theClassifier = new MCMaxEnt (ilist.getPipe(), parameters, featureSelection, perLabelFeatureSelection); } cachedValueStale = true; cachedGradientStale = true; // Initialize the constraints logger.fine("Number of instances in training list = " + trainingList.size()); for (Instance inst : trainingList) { double instanceWeight = trainingList.getInstanceWeight(inst); Labeling labeling = inst.getLabeling (); //logger.fine ("Instance "+ii+" labeling="+labeling); FeatureVector fv = (FeatureVector) inst.getData (); Alphabet fdict = fv.getAlphabet(); assert (fv.getAlphabet() == fd); int li = labeling.getBestIndex(); // The "2*" below is because there is one copy for the p(y|x)and another for the p(x|y). MatrixOps.rowPlusEquals (constraints, numFeatures, li, fv, 2*instanceWeight); // For the default feature, whose weight is 1.0 assert(!Double.isNaN(instanceWeight)) : "instanceWeight is NaN"; assert(!Double.isNaN(li)) : "bestIndex is NaN"; boolean hasNaN = false; for(int i = 0; i < fv.numLocations(); i++) { if(Double.isNaN(fv.valueAtLocation(i))) { logger.info("NaN for feature " + fdict.lookupObject(fv.indexAtLocation(i)).toString()); hasNaN = true; } } if(hasNaN) logger.info("NaN in instance: " + inst.getName()); // Only p(y|x) uses the default feature; p(x|y) doesn't use it. The default feature value is 1.0. constraints[li*numFeatures + defaultFeatureIndex] += instanceWeight; } //TestMaximizable.testValueAndGradientCurrentParameters (this); } public MCMaxEnt getClassifier () { return theClassifier; } public double getParameter (int index) { return parameters[index]; } public void setParameter (int index, double v) { cachedValueStale = true; cachedGradientStale = true; parameters[index] = v; } public int getNumParameters() { return parameters.length; } public void getParameters (double[] buff) { if (buff == null || buff.length != parameters.length) buff = new double [parameters.length]; System.arraycopy (parameters, 0, buff, 0, parameters.length); } public void setParameters (double [] buff) { assert (buff != null); cachedValueStale = true; cachedGradientStale = true; if (buff.length != parameters.length) parameters = new double[buff.length]; System.arraycopy (buff, 0, parameters, 0, buff.length); } // log probability of the training labels public double getValue () { if (cachedValueStale) { numGetValueCalls++; cachedValue = 0; // We'll store the expectation values in "cachedGradient" for now cachedGradientStale = true; java.util.Arrays.fill (cachedGradient, 0.0); // Incorporate likelihood of data double[] scores = new double[trainingList.getTargetAlphabet().size()]; double value = 0.0; //System.out.println("I Now "+inputAlphabet.size()+" regular features."); Iterator<Instance> iter = trainingList.iterator(); //int ii = 0; // Normalize the parameters to be per-class multinomials double probs[][] = new double[scores.length][numFeatures]; double lprobs[][] = new double[scores.length][numFeatures]; for (int si = 0; si < scores.length; si++) { double sum = 0, max = MatrixOps.max (parameters); for (int fi = 0; fi < numFeatures; fi++) { // TODO Strongly consider some smoothing here. What happens when all parameters are zero? // Oh, this should be no problem, because exp(0) == 1. probs[si][fi] = Math.exp(parameters[si*numFeatures+fi] - max); sum += probs[si][fi]; } assert (sum > 0); for (int fi = 0; fi < numFeatures; fi++) { probs[si][fi] /= sum; lprobs[si][fi] = Math.log(probs[si][fi]); } } while (iter.hasNext()) { Instance instance = iter.next(); double instanceWeight = trainingList.getInstanceWeight(instance); Labeling labeling = instance.getLabeling (); //System.out.println("L Now "+inputAlphabet.size()+" regular features."); this.theClassifier.getClassificationScores (instance, scores); FeatureVector fv = (FeatureVector) instance.getData (); int li = labeling.getBestIndex(); value = - (instanceWeight * Math.log (scores[li])); if(Double.isNaN(value)) { logger.fine ("MCMaxEntTrainer: Instance " + instance.getName() + "has NaN value. log(scores)= " + Math.log(scores[li]) + " scores = " + scores[li] + " has instance weight = " + instanceWeight); } if (Double.isInfinite(value)) { logger.warning ("Instance "+instance.getSource() + " has infinite value; skipping value and gradient"); cachedValue -= value; cachedValueStale = false; return -value; // continue; } cachedValue += value; // CPAL - this is a loop over classes and their scores // - we compute the gradient by taking the dot product of the feature value // and the probability of the class for (int si = 0; si < scores.length; si++) { if (scores[si] == 0) continue; assert (!Double.isInfinite(scores[si])); // CPAL - accumulating the current classifiers expectation of the feature // vector counts for this class label // Current classifier has expectation over class label, not over feature vector MatrixOps.rowPlusEquals (cachedGradient, numFeatures, si, fv, -instanceWeight * scores[si]); cachedGradient[numFeatures*si + defaultFeatureIndex] += (-instanceWeight * scores[si]); } // CPAL - if we wish to do multiconditional training we need another term for this accumulated // expectation if (usingMultiConditionalTraining) { // need something analogous to this // this.theClassifier.getClassificationScores (instance, scores); // this.theClassifier.getFeatureDistributions (instance, // Note: li is the "label" for this instance // Get the sum of the feature vector // which is the number of counts for the document if we use that as input double Ncounts = MatrixOps.sum(fv); // CPAL - get the additional term for the value of our - log probability // - this computation amounts to the dot product of the feature vector and the probability vector cachedValue -= (instanceWeight * fv.dotProduct(lprobs[li])); // CPAL - get the model expectation over features for the given class for (int fi = 0; fi < numFeatures; fi++) { //if(parameters[numFeatures*li + fi] != 0) { // MatrixOps.rowPlusEquals(cachedGradient, numFeatures,li,fv,)) cachedGradient[numFeatures*li + fi] += (-instanceWeight * Ncounts * probs[li][fi]); // } } } } //logger.info ("-Expectations:"); cachedGradient.print(); // Incorporate prior on parameters if (usingHyperbolicPrior) { for (int li = 0; li < numLabels; li++) for (int fi = 0; fi < numFeatures; fi++) cachedValue += (hyperbolicPriorSlope / hyperbolicPriorSharpness * Math.log (Maths.cosh (hyperbolicPriorSharpness * parameters[li *numFeatures + fi]))); } else { for (int li = 0; li < numLabels; li++) for (int fi = 0; fi < numFeatures; fi++) { double param = parameters[li*numFeatures + fi]; cachedValue += param * param / (2 * gaussianPriorVariance); } } cachedValue *= -1.0; // MAXIMIZE, NOT MINIMIZE cachedValueStale = false; progressLogger.info ("Value (loglikelihood) = "+cachedValue); } return cachedValue; } // CPAL first get value, then gradient public void getValueGradient (double [] buffer) { // Gradient is (constraint - expectation - parameters/gaussianPriorVariance) if (cachedGradientStale) { numGetValueGradientCalls++; if (cachedValueStale) // This will fill in the cachedGradient with the "-expectation" getValue (); // cachedGradient contains the negative expectations // expectations are model expectations and constraints are // empirical expectations MatrixOps.plusEquals (cachedGradient, constraints); // CPAL - we need a second copy of the constraints // - actually, we only want this for the feature values // - I've moved this up into getValue //if (usingMultiConditionalTraining){ // MatrixOps.plusEquals(cachedGradient, constraints); //} // Incorporate prior on parameters if (usingHyperbolicPrior) { throw new UnsupportedOperationException ("Hyperbolic prior not yet implemented."); } else { MatrixOps.plusEquals (cachedGradient, parameters, -1.0 / gaussianPriorVariance); } // A parameter may be set to -infinity by an external user. // We set gradient to 0 because the parameter's value can // never change anyway and it will mess up future calculations // on the matrix, such as norm(). MatrixOps.substitute (cachedGradient, Double.NEGATIVE_INFINITY, 0.0); // Set to zero all the gradient dimensions that are not among the selected features if (perLabelFeatureSelection == null) { for (int labelIndex = 0; labelIndex < numLabels; labelIndex++) MatrixOps.rowSetAll (cachedGradient, numFeatures, labelIndex, 0.0, featureSelection, false); } else { for (int labelIndex = 0; labelIndex < numLabels; labelIndex++) MatrixOps.rowSetAll (cachedGradient, numFeatures, labelIndex, 0.0, perLabelFeatureSelection[labelIndex], false); } cachedGradientStale = false; } assert (buffer != null && buffer.length == parameters.length); System.arraycopy (cachedGradient, 0, buffer, 0, cachedGradient.length); } public double sumNegLogProb (double a, double b) { if (a == Double.POSITIVE_INFINITY && b == Double.POSITIVE_INFINITY) return Double.POSITIVE_INFINITY; else if (a > b) return b - Math.log (1 + Math.exp(b-a)); else return a - Math.log (1 + Math.exp(a-b)); } } }
29,829
38.720373
140
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/ClassifierTrainer.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.io.*; import java.util.*; import cc.mallet.classify.Classifier; import cc.mallet.fst.TransducerTrainer; import cc.mallet.optimize.Optimizer; import cc.mallet.types.FeatureSelection; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.Labeler; import cc.mallet.util.BshInterpreter; import cc.mallet.util.CommandOption; /** * Abstract parent of all classifier trainers. * <p> * All classification techniques in MALLET are implement as two classes: * a trainer and a classifier. The trainer ingests the training data * and creates a classifier that holds the parameters set during training. * The classifier applies those parameters to an Instance to produce * a classification of the Instance. * <p> * A concrete trainer is required only to be able to train from an InstanceList. * Trainers that can incrementally train are subclasses of IncrementalTrainingClassifier. * <p> * The command line interface tools for document classification are: * {@link cc.mallet.classify.tui.Csv2Vectors}, * {@link cc.mallet.classify.tui.Text2Vectors}, * {@link cc.mallet.classify.tui.Vectors2Classify}, * {@link cc.mallet.classify.tui.Vectors2Info}, and * {@link cc.mallet.classify.tui.Vectors2Vectors} * * @see Classifier * @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ /** Each ClassifierTrainer trains one Classifier based on various interfaces for consuming training data. * If you want an object that can train be asked to train on multiple different training sets and * yield different classifiers, you probably want a ClassifierTrainer.Factory. */ public abstract class ClassifierTrainer<C extends Classifier> { protected InstanceList validationSet; protected boolean finishedTraining = false; public boolean isFinishedTraining() { return finishedTraining; } // Careful to set this properly in subclasses! Consider removing -akm 1/08 public abstract C getClassifier(); public abstract C train (InstanceList trainingSet); public void setValidationInstances (InstanceList validationSet) { this.validationSet = validationSet; } public InstanceList getValidationInstances () { return this.validationSet; } /* No, it is fine if these can be set in the constructor only. * Individual ClassifierTrainer subclasses could provide this interface if desired. public C setInitialClassifier (C initialClassifier) { return null; } public C getInitialClassifier () { return null; } */ public interface ByOptimization<C extends Classifier> { public C train (InstanceList trainingSet, int numIterations); public Optimizer getOptimizer (); public abstract int getIteration(); } /** For active learning, in which this trainer will select certain instances and * request that the Labeler instance label them. * @param trainingAndUnlabeledSet the instances on which to train; some may be labeled; unlabeled ones may have their label requested from the labeler. * @param labeler * @param numLabelRequests the number of times to call labeler.label(). */ public interface ByActiveLearning<C extends Classifier> { public C train (InstanceList trainingAndUnlabeledSet, Labeler labeler, int numLabelRequests); } /** For various kinds of online learning by batches, where training instances are presented, * consumed for learning immediately. The same instances may be presented more than once to * this interface. For example, StochasticGradient, etc conforms to this interface. */ public interface ByIncrements<C extends Classifier> { public C trainIncremental (InstanceList trainingInstancesToAdd); } /** For online learning that can operate on one instance at a time. For example, Perceptron. */ public interface ByInstanceIncrements<C extends Classifier> extends ByIncrements<C> { public C trainIncremental (Instance instanceToAdd); } /** Instances of a Factory know how to create new ClassifierTrainers to apply to new Classifiers. */ public static abstract class Factory<CT extends ClassifierTrainer<? extends Classifier>> { // This is recommended (but cannot be enforced in Java) that subclasses implement // public static Classifier train (InstanceList trainingSet) // public static Classifier train (InstanceList trainingSet, InstanceList validationSet) // public static Classifier train (InstanceList trainingSet, InstanceList validationSet, Classifier initialClassifier) // which call public abstract CT newClassifierTrainer (Classifier initialClassifier); public CT newClassifierTrainer () { return newClassifierTrainer (null); } public String toString() { return this.getClass().getName(); } } }
5,163
43.517241
152
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/MaxEntOptimizableByLabelLikelihood.java
package cc.mallet.classify; import java.io.Serializable; import java.util.Arrays; import java.util.Iterator; import java.util.logging.Logger; import cc.mallet.optimize.LimitedMemoryBFGS; import cc.mallet.optimize.Optimizable; import cc.mallet.types.Alphabet; import cc.mallet.types.FeatureSelection; import cc.mallet.types.FeatureVector; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.Labeling; import cc.mallet.types.MatrixOps; import cc.mallet.util.MalletLogger; import cc.mallet.util.MalletProgressMessageLogger; import cc.mallet.util.Maths; public class MaxEntOptimizableByLabelLikelihood implements Optimizable.ByGradientValue { private static Logger logger = MalletLogger.getLogger(MaxEntOptimizableByLabelLikelihood.class.getName()); private static Logger progressLogger = MalletProgressMessageLogger.getLogger(MaxEntOptimizableByLabelLikelihood.class.getName()+"-pl"); // xxx Why does TestMaximizable fail when this variance is very small? static final double DEFAULT_GAUSSIAN_PRIOR_VARIANCE = 1; static final double DEFAULT_HYPERBOLIC_PRIOR_SLOPE = 0.2; static final double DEFAULT_HYPERBOLIC_PRIOR_SHARPNESS = 10.0; static final Class DEFAULT_MAXIMIZER_CLASS = LimitedMemoryBFGS.class; boolean usingHyperbolicPrior = false; boolean usingGaussianPrior = true; double gaussianPriorVariance = DEFAULT_GAUSSIAN_PRIOR_VARIANCE; double hyperbolicPriorSlope = DEFAULT_HYPERBOLIC_PRIOR_SLOPE; double hyperbolicPriorSharpness = DEFAULT_HYPERBOLIC_PRIOR_SHARPNESS; Class maximizerClass = DEFAULT_MAXIMIZER_CLASS; double[] parameters, constraints, cachedGradient; MaxEnt theClassifier; InstanceList trainingList; // The expectations are (temporarily) stored in the cachedGradient double cachedValue; boolean cachedValueStale; boolean cachedGradientStale; int numLabels; int numFeatures; int defaultFeatureIndex; // just for clarity FeatureSelection featureSelection; FeatureSelection[] perLabelFeatureSelection; int numGetValueCalls = 0; int numGetValueGradientCalls = 0; public MaxEntOptimizableByLabelLikelihood() { } public MaxEntOptimizableByLabelLikelihood (InstanceList trainingSet, MaxEnt initialClassifier) { this.trainingList = trainingSet; Alphabet fd = trainingSet.getDataAlphabet(); LabelAlphabet ld = (LabelAlphabet) trainingSet.getTargetAlphabet(); // Don't fd.stopGrowth, because someone might want to do feature induction ld.stopGrowth(); // Add one feature for the "default feature". this.numLabels = ld.size(); this.numFeatures = fd.size() + 1; this.defaultFeatureIndex = numFeatures-1; this.parameters = new double [numLabels * numFeatures]; this.constraints = new double [numLabels * numFeatures]; this.cachedGradient = new double [numLabels * numFeatures]; Arrays.fill (parameters, 0.0); Arrays.fill (constraints, 0.0); Arrays.fill (cachedGradient, 0.0); this.featureSelection = trainingSet.getFeatureSelection(); this.perLabelFeatureSelection = trainingSet.getPerLabelFeatureSelection(); // Add the default feature index to the selection if (featureSelection != null) featureSelection.add (defaultFeatureIndex); if (perLabelFeatureSelection != null) for (int i = 0; i < perLabelFeatureSelection.length; i++) perLabelFeatureSelection[i].add (defaultFeatureIndex); // xxx Later change this to allow both to be set, but select which one to use by a boolean flag? assert (featureSelection == null || perLabelFeatureSelection == null); if (initialClassifier != null) { this.theClassifier = initialClassifier; this.parameters = theClassifier.parameters; this.featureSelection = theClassifier.featureSelection; this.perLabelFeatureSelection = theClassifier.perClassFeatureSelection; this.defaultFeatureIndex = theClassifier.defaultFeatureIndex; assert (initialClassifier.getInstancePipe() == trainingSet.getPipe()); } else if (this.theClassifier == null) { this.theClassifier = new MaxEnt (trainingSet.getPipe(), parameters, featureSelection, perLabelFeatureSelection); } cachedValueStale = true; cachedGradientStale = true; // Initialize the constraints logger.fine("Number of instances in training list = " + trainingList.size()); for (Instance inst : trainingList) { double instanceWeight = trainingList.getInstanceWeight(inst); Labeling labeling = inst.getLabeling (); if (labeling == null) continue; //logger.fine ("Instance "+ii+" labeling="+labeling); FeatureVector fv = (FeatureVector) inst.getData (); Alphabet fdict = fv.getAlphabet(); assert (fv.getAlphabet() == fd); int li = labeling.getBestIndex(); MatrixOps.rowPlusEquals (constraints, numFeatures, li, fv, instanceWeight); // For the default feature, whose weight is 1.0 assert(!Double.isNaN(instanceWeight)) : "instanceWeight is NaN"; assert(!Double.isNaN(li)) : "bestIndex is NaN"; boolean hasNaN = false; for (int i = 0; i < fv.numLocations(); i++) { if(Double.isNaN(fv.valueAtLocation(i))) { logger.info("NaN for feature " + fdict.lookupObject(fv.indexAtLocation(i)).toString()); hasNaN = true; } } if (hasNaN) logger.info("NaN in instance: " + inst.getName()); constraints[li*numFeatures + defaultFeatureIndex] += 1.0 * instanceWeight; } //TestMaximizable.testValueAndGradientCurrentParameters (this); } public MaxEnt getClassifier () { return theClassifier; } public double getParameter (int index) { return parameters[index]; } public void setParameter (int index, double v) { cachedValueStale = true; cachedGradientStale = true; parameters[index] = v; } public int getNumParameters() { return parameters.length; } public void getParameters (double[] buff) { if (buff == null || buff.length != parameters.length) buff = new double [parameters.length]; System.arraycopy (parameters, 0, buff, 0, parameters.length); } public void setParameters (double [] buff) { assert (buff != null); cachedValueStale = true; cachedGradientStale = true; if (buff.length != parameters.length) parameters = new double[buff.length]; System.arraycopy (buff, 0, parameters, 0, buff.length); } // log probability of the training labels public double getValue () { if (cachedValueStale) { numGetValueCalls++; cachedValue = 0; // We'll store the expectation values in "cachedGradient" for now cachedGradientStale = true; MatrixOps.setAll (cachedGradient, 0.0); // Incorporate likelihood of data double[] scores = new double[trainingList.getTargetAlphabet().size()]; double value = 0.0; Iterator<Instance> iter = trainingList.iterator(); int ii=0; while (iter.hasNext()) { ii++; Instance instance = iter.next(); double instanceWeight = trainingList.getInstanceWeight(instance); Labeling labeling = instance.getLabeling (); if (labeling == null) continue; //System.out.println("L Now "+inputAlphabet.size()+" regular features."); this.theClassifier.getClassificationScores (instance, scores); FeatureVector fv = (FeatureVector) instance.getData (); int li = labeling.getBestIndex(); value = - (instanceWeight * Math.log (scores[li])); if(Double.isNaN(value)) { logger.fine ("MaxEntTrainer: Instance " + instance.getName() + "has NaN value. log(scores)= " + Math.log(scores[li]) + " scores = " + scores[li] + " has instance weight = " + instanceWeight); } if (Double.isInfinite(value)) { logger.warning ("Instance "+instance.getSource() + " has infinite value; skipping value and gradient"); cachedValue -= value; cachedValueStale = false; return -value; // continue; } cachedValue += value; for (int si = 0; si < scores.length; si++) { if (scores[si] == 0) continue; assert (!Double.isInfinite(scores[si])); MatrixOps.rowPlusEquals (cachedGradient, numFeatures, si, fv, -instanceWeight * scores[si]); cachedGradient[numFeatures*si + defaultFeatureIndex] += (-instanceWeight * scores[si]); } } //logger.info ("-Expectations:"); cachedGradient.print(); // Incorporate prior on parameters double prior = 0; if (usingHyperbolicPrior) { for (int li = 0; li < numLabels; li++) for (int fi = 0; fi < numFeatures; fi++) prior += (hyperbolicPriorSlope / hyperbolicPriorSharpness * Math.log (Maths.cosh (hyperbolicPriorSharpness * parameters[li *numFeatures + fi]))); } else if (usingGaussianPrior) { for (int li = 0; li < numLabels; li++) for (int fi = 0; fi < numFeatures; fi++) { double param = parameters[li*numFeatures + fi]; prior += param * param / (2 * gaussianPriorVariance); } } double oValue = cachedValue; cachedValue += prior; cachedValue *= -1.0; // MAXIMIZE, NOT MINIMIZE cachedValueStale = false; progressLogger.info ("Value (labelProb="+oValue+" prior="+prior+") loglikelihood = "+cachedValue); } return cachedValue; } public void getValueGradient (double [] buffer) { // Gradient is (constraint - expectation - parameters/gaussianPriorVariance) if (cachedGradientStale) { numGetValueGradientCalls++; if (cachedValueStale) // This will fill in the cachedGradient with the "-expectation" getValue (); MatrixOps.plusEquals (cachedGradient, constraints); // Incorporate prior on parameters if (usingHyperbolicPrior) { throw new UnsupportedOperationException ("Hyperbolic prior not yet implemented."); } else if (usingGaussianPrior) { MatrixOps.plusEquals (cachedGradient, parameters, -1.0 / gaussianPriorVariance); } // A parameter may be set to -infinity by an external user. // We set gradient to 0 because the parameter's value can // never change anyway and it will mess up future calculations // on the matrix, such as norm(). MatrixOps.substitute (cachedGradient, Double.NEGATIVE_INFINITY, 0.0); // Set to zero all the gradient dimensions that are not among the selected features if (perLabelFeatureSelection == null) { for (int labelIndex = 0; labelIndex < numLabels; labelIndex++) MatrixOps.rowSetAll (cachedGradient, numFeatures, labelIndex, 0.0, featureSelection, false); } else { for (int labelIndex = 0; labelIndex < numLabels; labelIndex++) MatrixOps.rowSetAll (cachedGradient, numFeatures, labelIndex, 0.0, perLabelFeatureSelection[labelIndex], false); } cachedGradientStale = false; } assert (buffer != null && buffer.length == parameters.length); System.arraycopy (cachedGradient, 0, buffer, 0, cachedGradient.length); //System.out.println ("MaxEntTrainer gradient infinity norm = "+MatrixOps.infinityNorm(cachedGradient)); } // XXX Should these really be public? Why? /** Counts how many times this trainer has computed the gradient of the * log probability of training labels. */ public int getValueGradientCalls() {return numGetValueGradientCalls;} /** Counts how many times this trainer has computed the * log probability of training labels. */ public int getValueCalls() {return numGetValueCalls;} // public int getIterations() {return maximizerByGradient.getIterations();} public MaxEntOptimizableByLabelLikelihood useGaussianPrior () { this.usingGaussianPrior = true; this.usingHyperbolicPrior = false; return this; } public MaxEntOptimizableByLabelLikelihood useHyperbolicPrior () { this.usingGaussianPrior = false; this.usingHyperbolicPrior = true; return this; } /** * In some cases a prior term is implemented in the optimizer, * (eg orthant-wise L-BFGS), so we occasionally want to only * calculate the log likelihood. */ public MaxEntOptimizableByLabelLikelihood useNoPrior () { this.usingGaussianPrior = false; this.usingHyperbolicPrior = false; return this; } /** * Sets a parameter to prevent overtraining. A smaller variance for the prior * means that feature weights are expected to hover closer to 0, so extra * evidence is required to set a higher weight. * @return This trainer */ public MaxEntOptimizableByLabelLikelihood setGaussianPriorVariance (double gaussianPriorVariance) { this.usingGaussianPrior = true; this.usingHyperbolicPrior = false; this.gaussianPriorVariance = gaussianPriorVariance; return this; } public MaxEntOptimizableByLabelLikelihood setHyperbolicPriorSlope (double hyperbolicPriorSlope) { this.usingGaussianPrior = false; this.usingHyperbolicPrior = true; this.hyperbolicPriorSlope = hyperbolicPriorSlope; return this; } public MaxEntOptimizableByLabelLikelihood setHyperbolicPriorSharpness (double hyperbolicPriorSharpness) { this.usingGaussianPrior = false; this.usingHyperbolicPrior = true; this.hyperbolicPriorSharpness = hyperbolicPriorSharpness; return this; } }
12,904
36.297688
115
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/MaxEnt.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.PrintWriter; import java.io.Serializable; import java.io.PrintStream; import cc.mallet.pipe.Pipe; import cc.mallet.types.Alphabet; import cc.mallet.types.DenseVector; import cc.mallet.types.FeatureSelection; import cc.mallet.types.FeatureVector; import cc.mallet.types.Instance; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.LabelVector; import cc.mallet.types.MatrixOps; import cc.mallet.types.RankedFeatureVector; /** * Maximum Entropy (AKA Multivariate Logistic Regression) classifier. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class MaxEnt extends Classifier implements Serializable { static final double DEFAULT_TEMPERATURE = 1.0; protected double [] parameters; // indexed by <labelIndex,featureIndex> protected int defaultFeatureIndex; protected FeatureSelection featureSelection; protected FeatureSelection[] perClassFeatureSelection; // The default feature is always the feature with highest index public MaxEnt (Pipe dataPipe, double[] parameters, FeatureSelection featureSelection, FeatureSelection[] perClassFeatureSelection) { super (dataPipe); assert (featureSelection == null || perClassFeatureSelection == null); if (parameters != null) this.parameters = parameters; else this.parameters = new double[getNumParameters(dataPipe)]; this.featureSelection = featureSelection; this.perClassFeatureSelection = perClassFeatureSelection; this.defaultFeatureIndex = dataPipe.getDataAlphabet().size(); // assert (parameters.getNumCols() == defaultFeatureIndex+1); } public MaxEnt (Pipe dataPipe, double[] parameters, FeatureSelection featureSelection) { this (dataPipe, parameters, featureSelection, null); } public MaxEnt (Pipe dataPipe, double[] parameters, FeatureSelection[] perClassFeatureSelection) { this (dataPipe, parameters, null, perClassFeatureSelection); } public MaxEnt (Pipe dataPipe, double[] parameters) { this (dataPipe, parameters, null, null); } public double[] getParameters () { return parameters; } public int getNumParameters () { assert (this.instancePipe.getDataAlphabet() != null); assert (this.instancePipe.getTargetAlphabet() != null); return MaxEnt.getNumParameters(this.instancePipe); } public static int getNumParameters (Pipe instancePipe) { return (instancePipe.getDataAlphabet().size() + 1) * instancePipe.getTargetAlphabet().size(); } public void setParameters(double[] parameters){ this.parameters = parameters; } public void setParameter (int classIndex, int featureIndex, double value) { parameters[classIndex*(getAlphabet().size()+1) + featureIndex] = value; } public FeatureSelection getFeatureSelection() { return featureSelection; } public MaxEnt setFeatureSelection (FeatureSelection fs) { featureSelection = fs; return this; } public FeatureSelection[] getPerClassFeatureSelection(){ return perClassFeatureSelection; } public MaxEnt setPerClassFeatureSelection (FeatureSelection[] fss){ this.perClassFeatureSelection = fss; return this; } public int getDefaultFeatureIndex(){ return defaultFeatureIndex; } public void setDefaultFeatureIndex(int defaultFeatureIndex){ this.defaultFeatureIndex = defaultFeatureIndex; } public void getUnnormalizedClassificationScores (Instance instance, double[] scores) { // arrayOutOfBounds if pipe has grown since training // int numFeatures = getAlphabet().size() + 1; int numFeatures = this.defaultFeatureIndex + 1; int numLabels = getLabelAlphabet().size(); assert (scores.length == numLabels); FeatureVector fv = (FeatureVector) instance.getData (); // Make sure the feature vector's feature dictionary matches // what we are expecting from our data pipe (and thus our notion // of feature probabilities. assert (fv.getAlphabet () == this.instancePipe.getDataAlphabet ()); // Include the feature weights according to each label for (int li = 0; li < numLabels; li++) { scores[li] = parameters[li*numFeatures + defaultFeatureIndex] + MatrixOps.rowDotProduct (parameters, numFeatures, li, fv, defaultFeatureIndex, (perClassFeatureSelection == null ? featureSelection : perClassFeatureSelection[li])); } } public void getClassificationScores (Instance instance, double[] scores) { getUnnormalizedClassificationScores(instance, scores); // Move scores to a range where exp() is accurate, and normalize int numLabels = getLabelAlphabet().size(); double max = MatrixOps.max (scores); double sum = 0; for (int li = 0; li < numLabels; li++) sum += (scores[li] = Math.exp (scores[li] - max)); for (int li = 0; li < numLabels; li++) { scores[li] /= sum; // xxxNaN assert (!Double.isNaN(scores[li])); } } //modified by Limin Yao, to deal with decreasing the peak of some labels public void getClassificationScoresWithTemperature (Instance instance, double temperature, double[] scores) { getUnnormalizedClassificationScores(instance, scores); //scores should be divided by temperature, scores are sum of weighted features MatrixOps.timesEquals(scores, 1/temperature); // Move scores to a range where exp() is accurate, and normalize int numLabels = getLabelAlphabet().size(); double max = MatrixOps.max (scores); double sum = 0; for (int li = 0; li < numLabels; li++) sum += (scores[li] = Math.exp (scores[li] - max)); for (int li = 0; li < numLabels; li++) { scores[li] /= sum; // xxxNaN assert (!Double.isNaN(scores[li])); } } //modified by Limin Yao, using temperature classification score public Classification classify (Instance instance) { int numClasses = getLabelAlphabet().size(); double[] scores = new double[numClasses]; //getClassificationScores (instance, scores); getClassificationScoresWithTemperature (instance, DEFAULT_TEMPERATURE, scores); // Create and return a Classification object return new Classification (instance, this, new LabelVector (getLabelAlphabet(), scores)); } public void print () { print(System.out); } public void print (PrintStream out) { final Alphabet dict = getAlphabet(); final LabelAlphabet labelDict = getLabelAlphabet(); int numFeatures = dict.size() + 1; int numLabels = labelDict.size(); // Include the feature weights according to each label for (int li = 0; li < numLabels; li++) { out.println ("FEATURES FOR CLASS "+labelDict.lookupObject (li)); out.println (" <default> "+parameters [li*numFeatures + defaultFeatureIndex]); for (int i = 0; i < defaultFeatureIndex; i++) { Object name = dict.lookupObject (i); double weight = parameters [li*numFeatures + i]; out.println (" "+name+" "+weight); } } } //printRank, added by Limin Yao public void printRank (PrintWriter out) { final Alphabet dict = getAlphabet(); final LabelAlphabet labelDict = getLabelAlphabet(); int numFeatures = dict.size() + 1; int numLabels = labelDict.size(); // Include the feature weights according to each label RankedFeatureVector rfv; double[] weights = new double[numFeatures-1]; // do not deal with the default feature for (int li = 0; li < numLabels; li++) { out.print ("FEATURES FOR CLASS "+labelDict.lookupObject (li) + " "); for (int i = 0; i < defaultFeatureIndex; i++) { double weight = parameters [li*numFeatures + i]; weights[i] = weight; } rfv = new RankedFeatureVector(dict,weights); rfv.printByRank(out); out.println (" <default> "+parameters [li*numFeatures + defaultFeatureIndex] + " "); } } public void printExtremeFeatures (PrintWriter out,int num) { final Alphabet dict = getAlphabet(); final LabelAlphabet labelDict = getLabelAlphabet(); int numFeatures = dict.size() + 1; int numLabels = labelDict.size(); // Include the feature weights according to each label RankedFeatureVector rfv; double[] weights = new double[numFeatures-1]; // do not deal with the default feature for (int li = 0; li < numLabels; li++) { out.print ("FEATURES FOR CLASS "+labelDict.lookupObject (li) + " "); for (int i = 0; i < defaultFeatureIndex; i++) { Object name = dict.lookupObject (i); double weight = parameters [li*numFeatures + i]; weights[i] = weight; } rfv = new RankedFeatureVector(dict,weights); rfv.printTopK(out,num); out.print (" <default> "+parameters [li*numFeatures + defaultFeatureIndex] + " "); rfv.printLowerK(out, num); out.println(); } } private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; static final int NULL_INTEGER = -1; private void writeObject(ObjectOutputStream out) throws IOException { out.writeInt(CURRENT_SERIAL_VERSION); out.writeObject(getInstancePipe()); int np = parameters.length; out.writeInt(np); for (int p = 0; p < np; p++) out.writeDouble(parameters[p]); out.writeInt(defaultFeatureIndex); if (featureSelection == null) out.writeInt(NULL_INTEGER); else { out.writeInt(1); out.writeObject(featureSelection); } if (perClassFeatureSelection == null) out.writeInt(NULL_INTEGER); else { out.writeInt(perClassFeatureSelection.length); for (int i = 0; i < perClassFeatureSelection.length; i++) if (perClassFeatureSelection[i] == null) out.writeInt(NULL_INTEGER); else { out.writeInt(1); out.writeObject(perClassFeatureSelection[i]); } } } private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt(); if (version != CURRENT_SERIAL_VERSION) throw new ClassNotFoundException("Mismatched MaxEnt versions: wanted " + CURRENT_SERIAL_VERSION + ", got " + version); instancePipe = (Pipe) in.readObject(); int np = in.readInt(); parameters = new double[np]; for (int p = 0; p < np; p++) parameters[p] = in.readDouble(); defaultFeatureIndex = in.readInt(); int opt = in.readInt(); if (opt == 1) featureSelection = (FeatureSelection)in.readObject(); int nfs = in.readInt(); if (nfs >= 0) { perClassFeatureSelection = new FeatureSelection[nfs]; for (int i = 0; i < nfs; i++) { opt = in.readInt(); if (opt == 1) perClassFeatureSelection[i] = (FeatureSelection)in.readObject(); } } } }
11,021
30.947826
108
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/AdaBoostM2.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.io.Serializable; import cc.mallet.pipe.*; import cc.mallet.types.*; /** * AdaBoostM2 * * <p>Yoav Freund and Robert E. Schapire * "Experiments with a New Boosting Algorithm" * In Journal of Machine Learning: Proceedings of the 13th International Conference, 1996 * http://www.cs.princeton.edu/~schapire/papers/FreundSc96b.ps.Z * * @author Gary Huang <a href="mailto:[email protected]">[email protected]</a> */ public class AdaBoostM2 extends Classifier implements Serializable { private static final long serialVersionUID = 1L; Classifier[] weakClassifiers; double[] alphas; public AdaBoostM2 (Pipe instancePipe, Classifier[] weakClassifiers, double[] alphas) { super (instancePipe); this.weakClassifiers = weakClassifiers; this.alphas = alphas; } /** * Get the number of weak classifiers in this ensemble classifier */ public int getNumWeakClassifiers() { return alphas.length; } /** * Return an AdaBoostM2 classifier that uses only the first * <tt>numWeakClassifiersToUse</tt> weak learners. * * <p>The returned classifier's Pipe and weak classifiers * are backed by the respective objects of this classifier, * so changes to the returned classifier's Pipe and weak * classifiers are reflected in this classifier, and vice versa. */ public AdaBoostM2 getTrimmedClassifier(int numWeakClassifiersToUse) { if (numWeakClassifiersToUse <= 0 || numWeakClassifiersToUse > weakClassifiers.length) throw new IllegalArgumentException("number of weak learners to use out of range:" + numWeakClassifiersToUse); Classifier[] newWeakClassifiers = new Classifier[numWeakClassifiersToUse]; System.arraycopy(weakClassifiers, 0, newWeakClassifiers, 0, numWeakClassifiersToUse); double[] newAlphas = new double[numWeakClassifiersToUse]; System.arraycopy(alphas, 0, newAlphas, 0, numWeakClassifiersToUse); return new AdaBoostM2(instancePipe, newWeakClassifiers, newAlphas); } public Classification classify (Instance inst) { return classify(inst, weakClassifiers.length); } /** * Classify the given instance using only the first * <tt>numWeakClassifiersToUse</tt> classifiers * trained during boosting */ public Classification classify (Instance inst, int numWeakClassifiersToUse) { if (numWeakClassifiersToUse <= 0 || numWeakClassifiersToUse > weakClassifiers.length) throw new IllegalArgumentException("number of weak learners to use out of range:" + numWeakClassifiersToUse); FeatureVector fv = (FeatureVector) inst.getData(); assert (instancePipe == null || fv.getAlphabet () == this.instancePipe.getDataAlphabet ()); int numClasses = getLabelAlphabet().size(); double[] scores = new double[numClasses]; int bestIndex; double sum = 0; // Gather scores of all weakClassifiers for (int round = 0; round < numWeakClassifiersToUse; round++) { bestIndex = weakClassifiers[round].classify(inst).getLabeling().getBestIndex(); scores[bestIndex] += alphas[round]; sum += scores[bestIndex]; } // Normalize the scores for (int i = 0; i < scores.length; i++) scores[i] /= sum; return new Classification (inst, this, new LabelVector (getLabelAlphabet(), scores)); } }
3,902
35.476636
96
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/BalancedWinnowTrainer.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.io.Serializable; import java.util.Arrays; import cc.mallet.types.Alphabet; import cc.mallet.types.FeatureSelection; import cc.mallet.types.FeatureVector; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.Labeling; /** * An implementation of the training methods of a BalancedWinnow * on-line classifier. Given a labeled instance (x, y) the algorithm * computes dot(x, wi), for w1, ... , wc where wi is the weight * vector for class i. The instance is classified as class j * if the value of dot(x, wj) is the largest among the c dot * products. * * <p>The weight vectors are updated whenever the the classifier * makes a mistake or just barely got the correct answer (highest * dot product is within delta percent higher than the second highest). * Suppose the classifier guessed j and answer was j'. For each * feature i that is present, multiply w_ji by (1-epsilon) and * multiply w_j'i by (1+epsilon) * * <p>The above procedure is done multiple times to the training * examples (default is 5), and epsilon is cut by the cooling * rate at each iteration (default is cutting epsilon by half). * * @author Gary Huang <a href="mailto:[email protected]">[email protected]</a> */ public class BalancedWinnowTrainer extends ClassifierTrainer<BalancedWinnow> implements Boostable, Serializable { private static final long serialVersionUID = 1L; /** * 0.5 */ public static final double DEFAULT_EPSILON = .5; /** * 0.1 */ public static final double DEFAULT_DELTA = .1; /** * 30 */ public static final int DEFAULT_MAX_ITERATIONS = 30; /** * 0.5 */ public static final double DEFAULT_COOLING_RATE = .5; double m_epsilon; double m_delta; int m_maxIterations; double m_coolingRate; /** * Array of weights, one for each class and feature, initialized to 1. * For each class, there is an additional default "feature" weight * that is set to 1 in every example (it remains constant; this is * used to prevent the instance from having 0 dot product with a class). */ double[][] m_weights; BalancedWinnow classifier; public BalancedWinnow getClassifier () { return classifier; } /** * Default constructor. Sets all features to defaults. */ public BalancedWinnowTrainer() { this(DEFAULT_EPSILON, DEFAULT_DELTA, DEFAULT_MAX_ITERATIONS, DEFAULT_COOLING_RATE); } /** * @param epsilon percentage by which to increase/decrease weight vectors * when an example is misclassified. * @param delta percentage by which the highest (and correct) dot product * should exceed the second highest dot product before we consider an example * to be correctly classified (margin width) when adjusting weights. * @param maxIterations maximum number of times to loop through training examples. * @param coolingRate percentage of epsilon to decrease after each iteration */ public BalancedWinnowTrainer(double epsilon, double delta, int maxIterations, double coolingRate) { m_epsilon = epsilon; m_delta = delta; m_maxIterations = maxIterations; m_coolingRate = coolingRate; } /** * Trains the classifier on the instance list, updating * class weight vectors as appropriate * @param trainingList Instance list to be trained on * @return Classifier object containing learned weights */ public BalancedWinnow train (InstanceList trainingList) { FeatureSelection selectedFeatures = trainingList.getFeatureSelection(); if (selectedFeatures != null) // xxx Attend to FeatureSelection!!! throw new UnsupportedOperationException ("FeatureSelection not yet implemented."); double epsilon = m_epsilon; Alphabet dict = (Alphabet) trainingList.getDataAlphabet (); int numLabels = trainingList.getTargetAlphabet().size(); int numFeats = dict.size(); m_weights = new double [numLabels][numFeats+1]; // init weights to 1 for(int i = 0; i < numLabels; i++) Arrays.fill(m_weights[i], 1.0); // Loop through training instances multiple times double[] results = new double[numLabels]; for (int iter = 0; iter < m_maxIterations; iter++) { // loop through all instances for (int ii = 0; ii < trainingList.size(); ii++) { Instance inst = trainingList.get(ii); Labeling labeling = inst.getLabeling (); FeatureVector fv = (FeatureVector) inst.getData(); int fvisize = fv.numLocations(); int correctIndex = labeling.getBestIndex(); Arrays.fill(results, 0); // compute dot(x, wi) for each class i for(int lpos = 0; lpos < numLabels; lpos++) { for(int fvi = 0; fvi < fvisize; fvi++) { int fi = fv.indexAtLocation(fvi); double vi = fv.valueAtLocation(fvi); results[lpos] += vi * m_weights[lpos][fi]; } // This extra value comes from the extra // "feature" present in all examples results[lpos] += m_weights[lpos][numFeats]; } // Get indices of the classes with the 2 highest dot products int predictedIndex = 0; int secondHighestIndex = 0; double max = Double.MIN_VALUE; double secondMax = Double.MIN_VALUE; for (int i = 0; i < numLabels; i++) { if (results[i] > max) { secondMax = max; max = results[i]; secondHighestIndex = predictedIndex; predictedIndex = i; } else if (results[i] > secondMax) { secondMax = results[i]; secondHighestIndex = i; } } // Adjust weights if this example is mispredicted // or just barely correct if (predictedIndex != correctIndex) { for (int fvi = 0; fvi < fvisize; fvi++) { int fi = fv.indexAtLocation(fvi); m_weights[predictedIndex][fi] *= (1 - epsilon); m_weights[correctIndex][fi] *= (1 + epsilon); } m_weights[predictedIndex][numFeats] *= (1 - epsilon); m_weights[correctIndex][numFeats] *= (1 + epsilon); } else if (max/secondMax - 1 < m_delta) { for (int fvi = 0; fvi < fvisize; fvi++) { int fi = fv.indexAtLocation(fvi); m_weights[secondHighestIndex][fi] *= (1 - epsilon); m_weights[correctIndex][fi] *= (1 + epsilon); } m_weights[secondHighestIndex][numFeats] *= (1 - epsilon); m_weights[correctIndex][numFeats] *= (1 + epsilon); } } // Cut epsilon by the cooling rate epsilon *= (1-m_coolingRate); } this.classifier = new BalancedWinnow (trainingList.getPipe(), m_weights); return classifier; } }
6,860
32.305825
111
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/Classification.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.classify; import java.io.File; import java.io.FileNotFoundException; import java.io.PrintWriter; import java.io.Serializable; import cc.mallet.classify.Classifier; import cc.mallet.types.FeatureVector; import cc.mallet.types.Instance; import cc.mallet.types.LabelVector; import cc.mallet.types.Labeling; /** * The result of classifying a single instance. * Contains the instance, the classifier used, and the labeling the * classifier produced. * Also has methods for comparing the correct (true) label contained in the * target field of the instance with the one produced by the classifier. */ public class Classification implements Serializable { Instance instance; Classifier classifier; Labeling labeling; public Classification (Instance instance, Classifier classifier, Labeling labeling) { this.instance = instance; this.classifier = classifier; this.labeling = labeling; } public Instance getInstance () { return instance; } public Classifier getClassifier () { return classifier; } public Labeling getLabeling () { return labeling; } public LabelVector getLabelVector () { return labeling.toLabelVector(); } public boolean bestLabelIsCorrect () { Labeling correctLabeling = instance.getLabeling(); if (correctLabeling == null) throw new IllegalStateException ("Instance has no label."); return (labeling.getBestLabel().equals (correctLabeling.getBestLabel())); } public double valueOfCorrectLabel () { Labeling correctLabeling = instance.getLabeling(); int correctLabelIndex = correctLabeling.getBestIndex(); return labeling.value (correctLabelIndex); } public void print() { //not implemented } public void print (PrintWriter pw) throws FileNotFoundException { // xxx Fix this. /*System.out.print (classifier.getClass().getName() + "(."); System.out.print (") = ["); for (int i = 0; i < labeling.numLocations(); i++) System.out.print (labeling.labelAtLocation(i).toString()+"="+labeling.valueAtLocation(i)+" "); System.out.println ("]");*/ pw.print(classifier.getClass().getName()); pw.print(" "); pw.print(instance.getSource() + " "); for (int i = 0; i < labeling.numLocations(); i++) pw.print (labeling.labelAtLocation(i).toString()+"="+labeling.valueAtLocation(i)+" "); pw.println (); } public void printRank (PrintWriter pw) throws FileNotFoundException { // xxx Fix this. /*System.out.print (classifier.getClass().getName() + "(."); System.out.print (") = ["); for (int i = 0; i < labeling.numLocations(); i++) System.out.print (labeling.labelAtLocation(i).toString()+"="+labeling.valueAtLocation(i)+" "); System.out.println ("]");*/ pw.print(classifier.getClass().getName()); pw.print(" "); pw.print(instance.getSource() + " "); LabelVector lv = labeling.toLabelVector(); lv.printByRank(pw); pw.println (); } public Instance toInstance() { Instance ret; FeatureVector fv; double[] values = new double[labeling.numLocations()]; int[] indices = new int[labeling.numLocations()]; for(int i = 0; i < labeling.numLocations(); i++){ indices[i] = labeling.indexAtLocation(i); values[i] = labeling.valueAtLocation(i); } fv = new FeatureVector(labeling.getAlphabet(), indices, values); ret = new Instance(fv,null,null,instance.getSource()); return ret; } }
3,881
28.18797
97
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/BaggingClassifier.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import cc.mallet.pipe.*; import cc.mallet.types.*; /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class BaggingClassifier extends Classifier { Classifier[] baggedClassifiers; double[] weights; // Not yet implemented! public BaggingClassifier (Pipe instancePipe, Classifier[] baggedClassifiers) { super (instancePipe); this.baggedClassifiers = baggedClassifiers; } public Classification classify (Instance inst) { int numClasses = getLabelAlphabet().size(); double[] scores = new double[numClasses]; int bestIndex; double sum = 0; for (int i = 0; i < baggedClassifiers.length; i++) { Labeling labeling = baggedClassifiers[i].classify(inst).getLabeling(); labeling.addTo (scores); } MatrixOps.normalize (scores); return new Classification (inst, this, new LabelVector (getLabelAlphabet(), scores)); } }
1,359
29.222222
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/DecisionTree.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.io.Serializable; import java.util.logging.Logger; import cc.mallet.pipe.Pipe; import cc.mallet.types.Alphabet; import cc.mallet.types.AugmentableFeatureVector; import cc.mallet.types.FeatureSelection; import cc.mallet.types.FeatureVector; import cc.mallet.types.InfoGain; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.Labeling; import cc.mallet.util.MalletLogger; /** Decision Tree classifier. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class DecisionTree extends Classifier implements Serializable //implements InduceFeatures { private static final long serialVersionUID = 1L; private static Logger logger = MalletLogger.getLogger(DecisionTree.class.getName()); Node root; public DecisionTree (Pipe instancePipe, DecisionTree.Node root) { super (instancePipe); this.root = root; } public Node getRoot () { return root; } private Node getLeaf (Node node, FeatureVector fv) { if (node.child0 == null) return node; else if (fv.value (node.featureIndex) != 0) return getLeaf (node.child1, fv); else return getLeaf (node.child0, fv); } public Classification classify (Instance instance) { FeatureVector fv = (FeatureVector) instance.getData (); assert (instancePipe == null || fv.getAlphabet () == this.instancePipe.getDataAlphabet ()); Node leaf = getLeaf (root, fv); return new Classification (instance, this, leaf.labeling); } // Entropy of 1.0 would say that it take "one bit" to indicate the correct class, // e.g. that there is a 50/50 split between two classes given a particular feature public double addFeaturesClassEntropyThreshold = 0.7; public void induceFeatures (InstanceList ilist, boolean withFeatureShrinkage, boolean inducePerClassFeatures) { if (inducePerClassFeatures) { int numClasses = ilist.getTargetAlphabet().size(); // int numFeatures = ilist.getDataAlphabet().size(); FeatureSelection[] pcfs = new FeatureSelection[numClasses]; for (int j = 0; j < numClasses; j++) pcfs[j] = (FeatureSelection) ilist.getPerLabelFeatureSelection()[j].clone(); for (int i = 0; i < ilist.size(); i++) { Object data = ilist.get(i).getData(); AugmentableFeatureVector afv = (AugmentableFeatureVector) data; root.induceFeatures (afv, null, pcfs, ilist.getFeatureSelection(), ilist.getPerLabelFeatureSelection(), withFeatureShrinkage, inducePerClassFeatures, addFeaturesClassEntropyThreshold); } } else { throw new UnsupportedOperationException ("Not yet implemented"); } } public static class Node implements Serializable { private static final long serialVersionUID = 1L; int featureIndex; // the feature on which the children (would) distinguish double infoGain; // the information gain of splitting on this feature InstanceList ilist; Alphabet dictionary; double labelEntropy; // the class label entropy of data in this (unsplit) node Labeling labeling; // the class label distribution in the node (unsplit) Node parent, child0, child1; String name; // xxx Also calculate some sort of inverted entropy for feature induction, // in order to find the one class needs a new feature with a negative weight. public Node (InstanceList ilist, Node parent, FeatureSelection fs) { InfoGain ig = new InfoGain (ilist); this.featureIndex = ig.getMaxValuedIndexIn (fs); this.infoGain = ig.value(featureIndex); this.ilist = ilist; this.dictionary = ilist.getDataAlphabet(); this.parent = parent; this.labeling = ig.getBaseLabelDistribution(); this.labelEntropy = ig.getBaseEntropy(); this.child0 = this.child1 = null; } /** The root has depth zero. */ public int depth () { int depth = 0; Node p = parent; while (p != null) { p = p.parent; depth++; } return depth; } public boolean isLeaf () { return (child0 == null && child1 == null); } public boolean isRoot () { return parent == null; } public Node getFeatureAbsentChild () { return child0; } public Node getFeaturePresentChild () { return child1; } public double getSplitInfoGain () { return infoGain; } public Object getSplitFeature () { return ilist.getDataAlphabet().lookupObject(featureIndex); } public void split (FeatureSelection fs) { if (ilist == null) throw new IllegalStateException ("Frozen. Cannot split."); InstanceList ilist0 = new InstanceList (ilist.getPipe()); InstanceList ilist1 = new InstanceList (ilist.getPipe()); for (int i = 0; i < ilist.size(); i++) { Instance instance = ilist.get(i); FeatureVector fv = (FeatureVector) instance.getData (); // xxx What test should this be? What to do with negative values? // Whatever is decided here should also go in InfoGain.calcInfoGains() if (fv.value (featureIndex) != 0) { //System.out.println ("list1 add "+instance.getUri()+" weight="+ilist.getInstanceWeight(i)); ilist1.add (instance, ilist.getInstanceWeight(i)); } else { //System.out.println ("list0 add "+instance.getUri()+" weight="+ilist.getInstanceWeight(i)); ilist0.add (instance, ilist.getInstanceWeight(i)); } } logger.info("child0="+ilist0.size()+" child1="+ilist1.size()); child0 = new Node (ilist0, this, fs); child1 = new Node (ilist1, this, fs); } // Saves memory by allowing ilist to be garbage collected public void stopGrowth () { if (child0 != null) { child0.stopGrowth(); child1.stopGrowth(); } ilist = null; } public void induceFeatures (AugmentableFeatureVector afv, FeatureSelection featuresAlreadyThere, FeatureSelection[] perClassFeaturesAlreadyThere, FeatureSelection newFeatureSelection, FeatureSelection[] perClassNewFeatureSelection, boolean withInteriorNodes, boolean addPerClassFeatures, double classEntropyThreshold) { if (!isRoot() && (isLeaf() || withInteriorNodes) && labelEntropy < classEntropyThreshold) { String name = getName(); logger.info("Trying to add feature "+name); //int conjunctionIndex = afv.getAlphabet().lookupIndex (name, false); if (addPerClassFeatures) { int classIndex = labeling.getBestIndex(); if (!perClassFeaturesAlreadyThere[classIndex].contains (name)) { afv.add (name, 1.0); perClassNewFeatureSelection[classIndex].add (name); } } else { throw new UnsupportedOperationException ("Not yet implemented."); } } boolean featurePresent = afv.value (featureIndex) != 0; if (child0 != null && !featurePresent) child0.induceFeatures (afv, featuresAlreadyThere, perClassFeaturesAlreadyThere, newFeatureSelection, perClassNewFeatureSelection, withInteriorNodes, addPerClassFeatures, classEntropyThreshold); if (child1 != null && featurePresent) child1.induceFeatures (afv, featuresAlreadyThere, perClassFeaturesAlreadyThere, newFeatureSelection, perClassNewFeatureSelection, withInteriorNodes, addPerClassFeatures, classEntropyThreshold); } public String getName () { // String prefix; if (parent == null) return "root"; else if (parent.parent == null) { if (parent.getFeaturePresentChild() == this) return dictionary.lookupObject(parent.featureIndex).toString(); else { assert (dictionary != null); assert (dictionary.lookupObject(parent.featureIndex) != null); return "!" + dictionary.lookupObject(parent.featureIndex).toString(); } } else { if (parent.getFeaturePresentChild() == this) return parent.getName() + "&" + dictionary.lookupObject(parent.featureIndex).toString(); else return parent.getName() + "&!" + dictionary.lookupObject(parent.featureIndex).toString(); } } public void print () { if (child0 == null) System.out.println (getName() + ": " + labeling.getBestLabel()); else { child0.print(); child1.print(); } } } }
8,589
32.818898
110
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/C45Trainer.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.util.logging.Logger; import cc.mallet.classify.Boostable; import cc.mallet.classify.Classifier; import cc.mallet.classify.ClassifierTrainer; import cc.mallet.types.FeatureSelection; import cc.mallet.types.InstanceList; import cc.mallet.util.MalletLogger; import cc.mallet.util.Maths; /** * A C4.5 decision tree learner, approximtely. Currently treats all * features as continuous-valued, and has no notion of missing values.<p> * * This implementation uses MDL for pruning.<p> * * J. R. Quinlan<br> * "Improved Use of Continuous Attributes in C4.5" <br> * ftp://ftp.cs.cmu.edu/project/jair/volume4/quinlan96a.ps<p> * * J. R. Quinlan and R. L. Rivest<br> * "Inferring Decision Trees Using Minimum Description Length Principle" * * @author Gary Huang <a href="mailto:[email protected]">[email protected]</a> */ public class C45Trainer extends ClassifierTrainer<C45> implements Boostable { private static Logger logger = MalletLogger.getLogger(C45Trainer.class.getName()); boolean m_depthLimited = false; int m_maxDepth = 4; int m_minNumInsts = 2; // minimum number of instances in each node boolean m_doPruning = true; C45 classifier; public C45 getClassifier () { return classifier; } /** * Uses default values: not depth limited tree with * a minimum of 2 instances in each leaf node */ public C45Trainer() {} /** * Construct a depth-limited tree with the given depth limit */ public C45Trainer(int maxDepth) { m_maxDepth = maxDepth; m_depthLimited = true; } public C45Trainer(boolean doPruning) { m_doPruning = doPruning; } public C45Trainer(int maxDepth, boolean doPruning) { m_depthLimited = true; m_maxDepth = maxDepth; m_doPruning = doPruning; } public void setDoPruning(boolean doPruning) { m_doPruning = doPruning; } public boolean getDoPruning() { return m_doPruning; } public void setDepthLimited(boolean depthLimited) { m_depthLimited = depthLimited; } public boolean getDepthLimited() { return m_depthLimited; } public void setMaxDepth(int maxDepth) { m_maxDepth = maxDepth; } public int getMaxDepth() { return m_maxDepth; } public void setMinNumInsts(int minNumInsts) { m_minNumInsts = minNumInsts; } public int getMinNumInsts() { return m_minNumInsts; } protected void splitTree(C45.Node node, int depth) { // Stop growing the tree when any of the following is true: // 1. We care about tree depth and maximum depth is reached // 2. The entropy of the node is too small (i.e., all // instances belong to the same class) // 3. The gain ratio of the best split available is too small if (m_depthLimited && depth == m_maxDepth) { logger.info("Splitting stopped: maximum depth reached (" + m_maxDepth + ")"); return; } else if (Maths.almostEquals(node.getGainRatio().getBaseEntropy(), 0)) { logger.info("Splitting stopped: entropy of node too small (" + node.getGainRatio().getBaseEntropy() + ")"); return; } else if (Maths.almostEquals(node.getGainRatio().getMaxValue(), 0)) { logger.info("Splitting stopped: node has insignificant gain ratio (" + node.getGainRatio().getMaxValue() + ")"); return; } logger.info("Splitting feature \""+node.getSplitFeature() +"\" at threshold=" + node.getGainRatio().getMaxValuedThreshold() + " gain ratio="+node.getGainRatio().getMaxValue()); node.split(); splitTree(node.getLeftChild(), depth+1); splitTree(node.getRightChild(), depth+1); } public C45 train (InstanceList trainingList) { FeatureSelection selectedFeatures = trainingList.getFeatureSelection(); if (selectedFeatures != null) // xxx Attend to FeatureSelection!!! throw new UnsupportedOperationException ("FeatureSelection not yet implemented."); C45.Node root = new C45.Node(trainingList, null, m_minNumInsts); splitTree(root, 0); C45 tree = new C45 (trainingList.getPipe(), root); logger.info("C45 learned: (size=" + tree.getSize() + ")\n"); tree.print(); if (m_doPruning) { tree.prune(); logger.info("\nPruned C45: (size=" + tree.getSize() + ")\n"); root.print(); } root.stopGrowth(); this.classifier = tree; return classifier; } }
4,656
27.224242
115
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/Winnow.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Aron Culotta <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.classify; import cc.mallet.pipe.Pipe; import cc.mallet.types.FeatureVector; import cc.mallet.types.Instance; import cc.mallet.types.LabelVector; /** * Classification methods of Winnow2 algorithm. * @see WinnowTrainer */ public class Winnow extends Classifier{ /** *array of weights, one for each feature, initialized to 1 */ double [][] weights; /** *threshold for sum of wi*xi in formulating guess */ double theta; /** * Passes along data pipe and weights from * {@link #WinnowTrainer WinnowTrainer} * @param dataPipe needed for dictionary, labels, feature vectors, etc * @param newWeights weights calculated during training phase * @param theta value used for threshold * @param idim i dimension of weights array * @param jdim j dimension of weights array */ public Winnow (Pipe dataPipe, double [][]newWeights, double theta, int idim, int jdim){ super (dataPipe); this.theta = theta; this.weights = new double[idim][jdim]; for(int i=0; i<idim; i++) for(int j=0; j<jdim; j++) this.weights[i][j] = newWeights[i][j]; } /** * Classifies an instance using Winnow's weights * @param instance an instance to be classified * @return an object containing the classifier's guess */ public Classification classify (Instance instance){ int numClasses = getLabelAlphabet().size(); double[] scores = new double[numClasses]; FeatureVector fv = (FeatureVector) instance.getData (); // Make sure the feature vector's feature dictionary matches // what we are expecting from our data pipe (and thus our notion // of feature probabilities. assert (instancePipe == null || fv.getAlphabet () == this.instancePipe.getDataAlphabet ()); int fvisize = fv.numLocations(); // Set the scores by summing wi*xi for (int fvi = 0; fvi < fvisize; fvi++) { int fi = fv.indexAtLocation (fvi); for (int ci = 0; ci < numClasses; ci++) scores[ci] += this.weights[ci][fi]; } // Create and return a Classification object return new Classification (instance, this, new LabelVector (getLabelAlphabet(), scores)); } }
2,696
30.729412
93
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/NaiveBayes.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.io.ObjectOutputStream; import java.io.ObjectInputStream; import java.io.Serializable; import java.io.IOException; import java.util.Arrays; import cc.mallet.classify.Classifier; import cc.mallet.pipe.Pipe; import cc.mallet.types.*; import cc.mallet.types.Multinomial.Logged; /** * A classifier that classifies instances according to the NaiveBayes method. * In an Bayes classifier, * the p(Classification|Data) = p(Data|Classification)p(Classification)/p(Data) * <p> * To compute the likelihood: <br> * p(Data|Classification) = p(d1,d2,..dn | Classification) <br> * Naive Bayes makes the assumption that all of the data are conditionally * independent given the Classification: <br> * p(d1,d2,...dn | Classification) = p(d1|Classification)p(d2|Classification).. <br> * <p> * As with other classifiers in Mallet, NaiveBayes is implemented as two classes: * a trainer and a classifier. The {@link cc.mallet.classify.NaiveBayesTrainer} produces estimates of the various * p(dn|Classifier) and contructs this class with those estimates. * <p> * Instances are assumed to be {@link cc.mallet.types.FeatureVector}s * <p> * As with other Mallet classifiers, classification may only be performed on instances * processed with the pipe associated with this classifer, ie naiveBayes.getPipeInstance(); * The NaiveBayesTrainer sets this pipe to the pipe used to process the training instances. * <p> * A NaiveBayes classifier can be persisted and reused using serialization. * @see NaiveBayesTrainer * @see FeatureVector * * @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class NaiveBayes extends Classifier implements Serializable { Multinomial.Logged prior; Multinomial.Logged[] p; /** * Construct a NaiveBayes classifier from a pipe, prior estimates for each Classification, * and feature estimates of each Classification. A NaiveBayes classifier is generally * generated from a NaiveBayesTrainer, not constructed directly by users. * Proability estimates are converted and saved as logarithms internally. * @param instancePipe Used to check that feature vector dictionary for each instance * is the same as that associated with the pipe. Null suppresses check * @param prior Mulinomial that gives an estimate of the prior probability for * each Classification * @param classIndex2FeatureProb An array of multinomials giving an estimate * of the probability of a classification for each feature of each featurevector. */ public NaiveBayes (Pipe instancePipe, Multinomial.Logged prior, Multinomial.Logged[] classIndex2FeatureProb) { super (instancePipe); this.prior = prior; this.p = classIndex2FeatureProb; } private static Multinomial.Logged[] logMultinomials (Multinomial[] m) { Multinomial.Logged[] ml = new Multinomial.Logged[m.length]; for (int i = 0; i < m.length; i++) ml[i] = new Multinomial.Logged (m[i]); return ml; } /** * Construct a NaiveBayes classifier from a pipe, prior estimates for each Classification, * and feature estimates of each Classification. A NaiveBayes classifier is generally * generated from a NaiveBayesTrainer, not constructed directly by users. * * @param dataPipe Used to check that feature vector dictionary for each instance * is the same as that associated with the pipe. Null suppresses check * @param prior Mulinomial that gives an estimate of the prior probability for * each Classification * @param classIndex2FeatureProb An array of multinomials giving an estimate * of the probability of a classification for each feature of each featurevector. */ public NaiveBayes (Pipe dataPipe, Multinomial prior, Multinomial[] classIndex2FeatureProb) { this (dataPipe, new Multinomial.Logged (prior), logMultinomials (classIndex2FeatureProb)); } public Multinomial.Logged[] getMultinomials() { return p; } public Logged getPriors() { return prior; } public void printWords (int numToPrint) { Alphabet alphabet = instancePipe.getDataAlphabet(); int numFeatures = alphabet.size(); int numLabels = instancePipe.getTargetAlphabet().size(); double[] probs = new double[numFeatures]; numToPrint = Math.min(numToPrint, numFeatures); for (int li = 0; li < numLabels; li++) { Arrays.fill (probs, 0.0); p[li].addProbabilities(probs); RankedFeatureVector rfv = new RankedFeatureVector (alphabet, probs); System.out.println ("\nFeature probabilities "+instancePipe.getTargetAlphabet().lookupObject(li)); for (int i = 0; i < numToPrint; i++) System.out.println (rfv.getObjectAtRank(i)+" "+rfv.getValueAtRank(i)); } } /** * Classify an instance using NaiveBayes according to the trained data. * The alphabet of the featureVector of the instance must match the * alphabe of the pipe used to train the classifier. * @param instance to be classified. Data field must be a FeatureVector * @return Classification containing the labeling of the instance */ public Classification classify (Instance instance) { // Note that the current size of the label alphabet can be larger // than it was at the time of training. We are careful here // to correctly handle those labels here. For example, // we assume the log prior probability of those classes is // minus infinity. int numClasses = getLabelAlphabet().size(); double[] scores = new double[numClasses]; FeatureVector fv = (FeatureVector) instance.getData (); // Make sure the feature vector's feature dictionary matches // what we are expecting from our data pipe (and thus our notion // of feature probabilities. assert (instancePipe == null || fv.getAlphabet () == instancePipe.getDataAlphabet ()); int fvisize = fv.numLocations(); prior.addLogProbabilities (scores); // Set the scores according to the feature weights and per-class probabilities for (int fvi = 0; fvi < fvisize; fvi++) { int fi = fv.indexAtLocation(fvi); for (int ci = 0; ci < numClasses; ci++) { // guard against dataAlphabet or target alphabet growing; can happen if classifying // a never before seen feature. Ignore these. if (ci >= p.length || fi >= p[ci].size()) continue; scores[ci] += fv.valueAtLocation(fvi) * p[ci].logProbability(fi); } } // Get the scores in the range near zero, where exp() is more accurate double maxScore = Double.NEGATIVE_INFINITY; for (int ci = 0; ci < numClasses; ci++) if (scores[ci] > maxScore) maxScore = scores[ci]; for (int ci = 0; ci < numClasses; ci++) scores[ci] -= maxScore; // Exponentiate and normalize double sum = 0; for (int ci = 0; ci < numClasses; ci++) sum += (scores[ci] = Math.exp (scores[ci])); for (int ci = 0; ci < numClasses; ci++) scores[ci] /= sum; // Create and return a Classification object return new Classification (instance, this, new LabelVector (getLabelAlphabet(), scores)); } private double dataLogProbability (Instance instance, int labelIndex) { FeatureVector fv = (FeatureVector) instance.getData (); int fvisize = fv.numLocations(); double logProb = 0; for (int fvi = 0; fvi < fvisize; fvi++) logProb += fv.valueAtLocation(fvi) * p[labelIndex].logProbability(fv.indexAtLocation(fvi)); return logProb; } public double dataLogLikelihood (InstanceList ilist) { double logLikelihood = 0; for (int ii = 0; ii < ilist.size(); ii++) { double instanceWeight = ilist.getInstanceWeight(ii); Instance inst = ilist.get(ii); Labeling labeling = inst.getLabeling (); if (labeling != null) logLikelihood += instanceWeight * dataLogProbability (inst, labeling.getBestIndex()); else { Labeling predicted = this.classify(inst).getLabeling(); //System.err.println ("label = \n"+labeling); //System.err.println ("predicted = \n"+predicted); for (int lpos = 0; lpos < predicted.numLocations(); lpos++) { int li = predicted.indexAtLocation (lpos); double labelWeight = predicted.valueAtLocation (lpos); //System.err.print (", "+labelWeight); if (labelWeight == 0) continue; logLikelihood += instanceWeight * labelWeight * dataLogProbability (inst, li); } } } return logLikelihood; } public double labelLogLikelihood (InstanceList ilist) { double logLikelihood = 0; for (int ii = 0; ii < ilist.size(); ii++) { double instanceWeight = ilist.getInstanceWeight(ii); Instance inst = ilist.get(ii); Labeling labeling = inst.getLabeling (); if (labeling == null) continue; Labeling predicted = this.classify(inst).getLabeling(); //System.err.println ("label = \n"+labeling); //System.err.println ("predicted = \n"+predicted); if (labeling.numLocations() == 1) { logLikelihood += instanceWeight * Math.log (predicted.value(labeling.getBestIndex())); } else { for (int lpos = 0; lpos < labeling.numLocations(); lpos++) { int li = labeling.indexAtLocation (lpos); double labelWeight = labeling.valueAtLocation (lpos); //System.err.print (", "+labelWeight); if (labelWeight == 0) continue; logLikelihood += instanceWeight * labelWeight * Math.log (predicted.value(li)); } } } return logLikelihood; } // Serialization // serialVersionUID is overriden to prevent innocuous changes in this // class from making the serialization mechanism think the external // format has changed. private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject(ObjectOutputStream out) throws IOException { out.writeInt(CURRENT_SERIAL_VERSION); out.writeObject(getInstancePipe()); // write prior for each class out.writeObject(prior); // write array of conditional probability estimates out.writeObject(p); } private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt(); if (version != CURRENT_SERIAL_VERSION) throw new ClassNotFoundException("Mismatched NaiveBayes versions: wanted " + CURRENT_SERIAL_VERSION + ", got " + version); instancePipe = (Pipe) in.readObject(); prior = (Multinomial.Logged) in.readObject(); p = (Multinomial.Logged []) in.readObject(); } }
11,531
40.039146
114
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/ConfidencePredictingClassifier.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.classify; import java.util.ArrayList; import cc.mallet.classify.Classifier; import cc.mallet.pipe.Pipe; import cc.mallet.types.Alphabet; import cc.mallet.types.FeatureVector; import cc.mallet.types.Instance; import cc.mallet.types.Label; import cc.mallet.types.LabelVector; public class ConfidencePredictingClassifier extends Classifier { Classifier underlyingClassifier; Classifier confidencePredictingClassifier; double totalCorrect; double totalIncorrect; double totalIncorrectIncorrect; double totalIncorrectCorrect; int numCorrectInstances; int numIncorrectInstances; int numConfidenceCorrect; int numFalsePositive; int numFalseNegative; public ConfidencePredictingClassifier (Classifier underlyingClassifier, Classifier confidencePredictingClassifier) { super (underlyingClassifier.getInstancePipe()); this.underlyingClassifier = underlyingClassifier; this.confidencePredictingClassifier = confidencePredictingClassifier; // for testing confidence accuracy totalCorrect = 0.0; totalIncorrect = 0.0; totalIncorrectIncorrect = 0.0; totalIncorrectCorrect = 0.0; numCorrectInstances = 0; numIncorrectInstances = 0; numConfidenceCorrect = 0; numFalsePositive = 0; numFalseNegative = 0; } public Classification classify (Instance instance) { Classification c = underlyingClassifier.classify (instance); Classification cpc = confidencePredictingClassifier.classify (c); LabelVector lv = c.getLabelVector(); int bestIndex = lv.getBestIndex(); double [] values = new double[lv.numLocations()]; //// Put score of "correct" into score of the winning class... // xxx Can't set lv - it's immutable. // Must create copy and new classification object // lv.set (bestIndex, cpc.getLabelVector().value("correct")); //for (int i = 0; i < lv.numLocations(); i++) // if (i != bestIndex) // lv.set (i, 0.0); // Put score of "correct" in winning class and // set rest to 0 for (int i = 0; i < lv.numLocations(); i++) { if (i != bestIndex) values[i] = 0.0; else values[i] = cpc.getLabelVector().value("correct"); } //return c; if(c.bestLabelIsCorrect()){ numCorrectInstances++; totalCorrect+=cpc.getLabelVector().value("correct"); totalIncorrectCorrect+=cpc.getLabelVector().value("incorrect"); String correct = new String("correct"); if(correct.equals(cpc.getLabelVector().getBestLabel().toString())) numConfidenceCorrect++; else numFalseNegative++; } else{ numIncorrectInstances++; totalIncorrect+=cpc.getLabelVector().value("correct"); totalIncorrectIncorrect+=cpc.getLabelVector().value("incorrect"); if((new String("incorrect")).equals(cpc.getLabelVector().getBestLabel().toString())) numConfidenceCorrect++; else numFalsePositive++; } return new Classification(instance, this, new LabelVector(lv.getLabelAlphabet(), values)); // return cpc; } public void printAverageScores() { System.out.println("Mean score of correct for correct instances = " + meanCorrect()); System.out.println("Mean score of correct for incorrect instances = " + meanIncorrect()); System.out.println("Mean score of incorrect for correct instances = " + this.totalIncorrectCorrect/this.numCorrectInstances); System.out.println("Mean score of incorrect for incorrect instances = " + this.totalIncorrectIncorrect/this.numIncorrectInstances); } public void printConfidenceAccuracy() { System.out.println("Confidence predicting accuracy = " + ((double)numConfidenceCorrect/(numIncorrectInstances + numCorrectInstances))+ " false negatives: "+ numFalseNegative + "/"+numCorrectInstances + " false positives: "+ numFalsePositive +" / " +numIncorrectInstances); } public double meanCorrect() { if(this.numCorrectInstances==0) return 0.0; return (this.totalCorrect/(double)this.numCorrectInstances); } public double meanIncorrect() { if(this.numIncorrectInstances==0) return 0.0; return (this.totalIncorrect/(double)this.numIncorrectInstances); } }
4,588
32.992593
227
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/RankMaxEnt.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; //package edu.umass.cs.mallet.users.culotta.cluster.classify; //import edu.umass.cs.mallet.base.classify.*; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import cc.mallet.pipe.Pipe; import cc.mallet.types.Alphabet; import cc.mallet.types.DenseVector; import cc.mallet.types.FeatureSelection; import cc.mallet.types.FeatureVector; import cc.mallet.types.FeatureVectorSequence; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.LabelVector; import cc.mallet.types.MatrixOps; /** * Rank Maximum Entropy classifier. This classifier chooses among a set of * Instances with binary labels. Expects Instance data to be a * FeatureVectorSequence, and the target to be a String representation of the * index of the true best FeatureVectorSequence. Note that the Instance target * may be a Labels to indicate a tie for the best Instance. * * @author Aron Culotta <a * href="mailto:[email protected]">[email protected]</a> */ public class RankMaxEnt extends MaxEnt { // The default feature is always the feature with highest index public RankMaxEnt (Pipe dataPipe, double[] parameters, FeatureSelection featureSelection, FeatureSelection[] perClassFeatureSelection) { super (dataPipe, parameters, featureSelection, perClassFeatureSelection); } public RankMaxEnt (Pipe dataPipe, double[] parameters, FeatureSelection featureSelection) { this (dataPipe, parameters, featureSelection, null); } public RankMaxEnt (Pipe dataPipe, double[] parameters, FeatureSelection[] perClassFeatureSelection) { this (dataPipe, parameters, null, perClassFeatureSelection); } public RankMaxEnt (Pipe dataPipe, double[] parameters) { this (dataPipe, parameters, null, null); } /** returns unnormalized scores, corresponding to the score an * element of the InstanceList being the "top" instance * @param instance instance with data field a {@link InstanceList}. * @param scores has length = number of Instances in Instance.data, * which is of type InstanceList */ public void getUnnormalizedClassificationScores (Instance instance, double[] scores) { FeatureVectorSequence fvs = (FeatureVectorSequence)instance.getData(); assert (scores.length == fvs.size()); int numFeatures = instance.getDataAlphabet().size()+1; for (int instanceNumber=0; instanceNumber < fvs.size(); instanceNumber++) { FeatureVector fv = (FeatureVector)fvs.get(instanceNumber); // Make sure the feature vector's feature dictionary matches // what we are expecting from our data pipe (and thus our notion // of feature probabilities. assert (fv.getAlphabet () == this.instancePipe.getDataAlphabet ()); // Include the feature weights according to each label xxx is // this correct ? we only calculate the dot prod of the feature // vector with the "positiveLabel" weights // xxx include multiple labels scores[instanceNumber] = parameters[0*numFeatures + defaultFeatureIndex] + MatrixOps.rowDotProduct (parameters, numFeatures, 0, fv, defaultFeatureIndex, (perClassFeatureSelection == null ? featureSelection : perClassFeatureSelection[0])); } } public void getClassificationScores (Instance instance, double[] scores) { FeatureVectorSequence fvs = (FeatureVectorSequence)instance.getData(); int numFeatures = instance.getDataAlphabet().size()+1; int numLabels = fvs.size(); assert (scores.length == fvs.size()); for (int instanceNumber=0; instanceNumber < fvs.size(); instanceNumber++) { FeatureVector fv = (FeatureVector)fvs.get(instanceNumber); // Make sure the feature vector's feature dictionary matches // what we are expecting from our data pipe (and thus our notion // of feature probabilities. assert (fv.getAlphabet () == this.instancePipe.getDataAlphabet ()); // Include the feature weights according to each label scores[instanceNumber] = parameters[0*numFeatures + defaultFeatureIndex] + MatrixOps.rowDotProduct (parameters, numFeatures, 0, fv, defaultFeatureIndex, (perClassFeatureSelection == null ? featureSelection : perClassFeatureSelection[0])); } // Move scores to a range where exp() is accurate, and normalize double max = MatrixOps.max (scores); double sum = 0; for (int li = 0; li < numLabels; li++) sum += (scores[li] = Math.exp (scores[li] - max)); for (int li = 0; li < numLabels; li++) { scores[li] /= sum; // xxxNaN assert (!Double.isNaN(scores[li])); } } /** * Used by RankMaxEntTrainer to calculate the value when the labeling contains ties. Does not include scores of tied elements in normalization. * @param instance * @param scores * @param bestLabels Indices of Instances ties for 1st place. */ public void getClassificationScoresForTies (Instance instance, double[] scores, int[] bestLabels) { getClassificationScores(instance, scores); // Set all bestLabel probs to 0 except for first and renormalize for (int i = 1; i < bestLabels.length; i++) scores[bestLabels[i]] = 0.0; double sum = 0.0; for (int li = 0; li < scores.length; li++) sum += scores[li]; for (int li = 0; li < scores.length; li++) scores[li] /= sum; } public Classification classify (Instance instance) { FeatureVectorSequence fvs = (FeatureVectorSequence) instance.getData(); int numClasses = fvs.size(); double[] scores = new double[numClasses]; getClassificationScores (instance, scores); // Create and return a Classification object return new Classification (instance, this, createLabelVector (getLabelAlphabet(), scores)); } /** Constructs a LabelVector which is a distribution over indices of * the "positive" Instance. */ private LabelVector createLabelVector (LabelAlphabet labelAlphabet, double[] scores) { if (labelAlphabet.growthStopped()) labelAlphabet.startGrowth(); for (int i=0; i < scores.length; i++) labelAlphabet.lookupIndex(String.valueOf(i), true); double[] allScores = new double[labelAlphabet.size()]; for (int i=0; i < labelAlphabet.size(); i++) allScores[i] = 0.0; for (int i=0; i < scores.length; i++) { int index = labelAlphabet.lookupIndex(String.valueOf(i), true); allScores[index] = scores[i]; } return new LabelVector(labelAlphabet, allScores); } public void print () { final Alphabet dict = getAlphabet(); final LabelAlphabet labelDict = (LabelAlphabet)getLabelAlphabet(); int numFeatures = dict.size() + 1; int numLabels = labelDict.size(); // Include the feature weights according to each label //for (int li = 0; li < numLabels; li++) { System.out.println ("FEATURES FOR CLASS "+labelDict.lookupObject (0)); System.out.println (" <default> "+parameters [defaultFeatureIndex]); for (int i = 0; i < defaultFeatureIndex; i++) { Object name = dict.lookupObject (i); double weight = parameters [i]; System.out.println (" "+name+" "+weight); } } // SERIALIZATION private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); out.writeInt (CURRENT_SERIAL_VERSION); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); int version = in.readInt (); } }
8,341
35.269565
144
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/ClassifierAccuracyEvaluator.java
package cc.mallet.classify; import cc.mallet.types.InstanceList; public class ClassifierAccuracyEvaluator extends ClassifierEvaluator { public ClassifierAccuracyEvaluator (InstanceList[] instances, String[] descriptions) { super(instances,descriptions); } public ClassifierAccuracyEvaluator (InstanceList instanceList1, String instanceListDescription1) { this(new InstanceList[] {instanceList1}, new String[] {instanceListDescription1}); } public ClassifierAccuracyEvaluator (InstanceList instanceList1, String instanceListDescription1, InstanceList instanceList2, String instanceListDescription2) { this(new InstanceList[] {instanceList1, instanceList2}, new String[] {instanceListDescription1, instanceListDescription2}); } public ClassifierAccuracyEvaluator (InstanceList instanceList1, String instanceListDescription1, InstanceList instanceList2, String instanceListDescription2, InstanceList instanceList3, String instanceListDescription3) { this(new InstanceList[] {instanceList1, instanceList2, instanceList3}, new String[] {instanceListDescription1, instanceListDescription2, instanceListDescription3}); } public void evaluateInstanceList (ClassifierTrainer trainer, InstanceList instances, String description) { Classifier classifier = trainer.getClassifier(); if (classifier.getFeatureSelection() != instances.getFeatureSelection()) // TODO consider if we really want to do this... but note that the old MaxEnt did this to the testing the validation sets. //instances.setFeatureSelection(classifier.getFeatureSelection()); System.out.print (description+" accuracy=" + classifier.getAccuracy (instances)); } }
1,682
41.075
125
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/DecisionTreeTrainer.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.util.logging.*; import cc.mallet.classify.Classifier; import cc.mallet.pipe.Pipe; import cc.mallet.types.Alphabet; import cc.mallet.types.FeatureSelection; import cc.mallet.types.FeatureVector; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.LabelVector; import cc.mallet.types.Labeling; import cc.mallet.types.Multinomial; import cc.mallet.util.MalletLogger; /** A decision tree learner, roughly ID3, but only to a fixed given depth in all branches. Does not yet implement splitting of continuous-valued features, but it should in the future. Currently a feature is considered "present" if it has positive value. ftp://ftp.cs.cmu.edu/project/jair/volume4/quinlan96a.ps Only set up for conveniently learning decision stubs: there is no pruning or good stopping rule. Currently only stop by reaching a maximum depth. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class DecisionTreeTrainer extends ClassifierTrainer<DecisionTree> implements Boostable { private static Logger logger = MalletLogger.getLogger(DecisionTreeTrainer.class.getName()); public static final int DEFAULT_MAX_DEPTH = 5; public static final double DEFAULT_MIN_INFO_GAIN_SPLIT = 0.001; int maxDepth = DEFAULT_MAX_DEPTH; double minInfoGainSplit = 0.001; boolean finished = false; DecisionTree classifier = null; public DecisionTreeTrainer (int maxDepth) { this.maxDepth = maxDepth; } public DecisionTreeTrainer () { this(4); } public DecisionTreeTrainer setMaxDepth (int maxDepth) { this.maxDepth = maxDepth; return this; } public DecisionTreeTrainer setMinInfoGainSplit (double m) { this.minInfoGainSplit = m; return this; } public boolean isFinishedTraining() { return finished; } public DecisionTree getClassifier() { return classifier; } public DecisionTree train (InstanceList trainingList) { FeatureSelection selectedFeatures = trainingList.getFeatureSelection(); DecisionTree.Node root = new DecisionTree.Node (trainingList, null, selectedFeatures); splitTree (root, selectedFeatures, 0); root.stopGrowth(); finished = true; System.out.println ("DecisionTree learned:"); root.print(); this.classifier = new DecisionTree (trainingList.getPipe(), root); return classifier; } protected void splitTree (DecisionTree.Node node, FeatureSelection selectedFeatures, int depth) { if (depth == maxDepth || node.getSplitInfoGain() < minInfoGainSplit) return; logger.info("Splitting feature \""+node.getSplitFeature() +"\" infogain="+node.getSplitInfoGain()); node.split(selectedFeatures); splitTree (node.getFeaturePresentChild(), selectedFeatures, depth+1); splitTree (node.getFeatureAbsentChild(), selectedFeatures, depth+1); } public static abstract class Factory extends ClassifierTrainer.Factory<DecisionTreeTrainer> { protected static int maxDepth = DEFAULT_MAX_DEPTH; protected static double minInfoGainSplit = DEFAULT_MIN_INFO_GAIN_SPLIT; // This is recommended (but cannot be enforced in Java) that subclasses implement // public static Classifier train (InstanceList trainingSet) // public static Classifier train (InstanceList trainingSet, InstanceList validationSet) // public static Classifier train (InstanceList trainingSet, InstanceList validationSet, Classifier initialClassifier) // which call public DecisionTreeTrainer newClassifierTrainer (Classifier initialClassifier) { DecisionTreeTrainer t = new DecisionTreeTrainer (); t.maxDepth = this.maxDepth; t.minInfoGainSplit = this.minInfoGainSplit; return t; } } /* public static void main () { DecisionTreeTrainer.Factory dtf = new DecisionTreeTrainer.Factory() {{ maxDepth = 6; }}; DecisionTreeTrainer.Factory dtf = new DecisionTreeTrainer.Factory().setMaxDepth(6).setMinInfoGainSplit(.2); } */ }
4,348
37.486726
120
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/AdaBoostM2Trainer.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.util.Random; import java.util.Arrays; import java.util.logging.*; import cc.mallet.types.*; import cc.mallet.util.MalletLogger; import cc.mallet.util.Maths; /** * This version of AdaBoost can handle multi-class problems. For * binary classification, can also use <tt>AdaBoostTrainer</tt>. * * <p>Yoav Freund and Robert E. Schapire * "Experiments with a New Boosting Algorithm" * In Journal of Machine Learning: Proceedings of the 13th International Conference, 1996 * http://www.cs.princeton.edu/~schapire/papers/FreundSc96b.ps.Z * * @author Gary Huang <a href="mailto:[email protected]">[email protected]</a> */ public class AdaBoostM2Trainer extends ClassifierTrainer<AdaBoostM2> { private static Logger logger = MalletLogger.getLogger(AdaBoostM2Trainer.class.getName()); private static int MAX_NUM_RESAMPLING_ITERATIONS = 10; ClassifierTrainer weakLearner; int numRounds; AdaBoostM2 classifier; public AdaBoostM2 getClassifier () { return classifier; } public AdaBoostM2Trainer (ClassifierTrainer weakLearner, int numRounds) { if (! (weakLearner instanceof Boostable)) throw new IllegalArgumentException ("weak learner not boostable"); if (numRounds <= 0) throw new IllegalArgumentException ("number of rounds must be positive"); this.weakLearner = weakLearner; this.numRounds = numRounds; } public AdaBoostM2Trainer (ClassifierTrainer weakLearner) { this (weakLearner, 100); } /** * Boosting method that resamples instances using their weights */ public AdaBoostM2 train (InstanceList trainingList) { FeatureSelection selectedFeatures = trainingList.getFeatureSelection(); if (selectedFeatures != null) throw new UnsupportedOperationException("FeatureSelection not yet implemented."); int numClasses = trainingList.getTargetAlphabet().size(); int numInstances = trainingList.size(); // Construct the set "B", a list of instances of size // (numInstances * (numClasses - 1)). // Each instance in this list will have weights // (mislabel distribution) associated with classes // the intance doesn't belong to. InstanceList trainingInsts = new InstanceList(trainingList.getPipe()); // Set the initial weights to be uniform double[] weights = new double[numInstances * (numClasses - 1)]; double w = 1.0 / weights.length; Arrays.fill(weights, w); int[] classIndices = new int[weights.length]; int numAdded = 0; for (int i = 0; i < numInstances; i++) { Instance inst = trainingList.get(i); int trueClassIndex = inst.getLabeling().getBestIndex(); for (int j = 0; j < numClasses; j++) { if (j != trueClassIndex) { trainingInsts.add(inst, 1); classIndices[numAdded] = j; numAdded++; } } } java.util.Random random = new java.util.Random(); Classifier[] weakLearners = new Classifier[numRounds]; double[] classifierWeights = new double[numRounds]; double[] exponents = new double[weights.length]; int[] instIndices = new int[weights.length]; for (int i = 0; i < instIndices.length; i++) instIndices[i] = i; // Boosting iterations for (int round = 0; round < numRounds; round++) { logger.info("=========== AdaBoostM2Trainer round " + (round+1) + " begin"); // Sample instances from set B using the // weight vector to train the weak learner double epsilon; InstanceList roundTrainingInsts = new InstanceList(trainingInsts.getPipe()); int resamplingIterations = 0; do { epsilon = 0; int[] sampleIndices = sampleWithWeights(instIndices, weights, random); roundTrainingInsts = new InstanceList(trainingInsts.getPipe(), sampleIndices.length); for (int i = 0; i < sampleIndices.length; i++) { Instance inst = trainingInsts.get(sampleIndices[i]); roundTrainingInsts.add(inst, 1); } weakLearners[round] = weakLearner.train(roundTrainingInsts); // Calculate the pseudo-loss of weak learner for (int i = 0; i < trainingInsts.size(); i++) { Instance inst = trainingInsts.get(i); Classification c = weakLearners[round].classify(inst); double htCorrect = c.valueOfCorrectLabel(); double htWrong = c.getLabeling().value(classIndices[i]); epsilon += weights[i] * (1 - htCorrect + htWrong); exponents[i] = 1 + htCorrect - htWrong; } epsilon *= 0.5; resamplingIterations++; } while (Maths.almostEquals(epsilon, 0) && resamplingIterations < MAX_NUM_RESAMPLING_ITERATIONS); // Stop boosting when pseudo-loss is 0, ignoring // weak classifier trained this round if (Maths.almostEquals(epsilon, 0)) { logger.info("AdaBoostM2Trainer stopped at " + (round+1) + " / " + numRounds + " pseudo-loss=" + epsilon); // If we are in the first round, have to use the weak classifier in any case int numClassifiersToUse = (round == 0) ? 1 : round; if (round == 0) classifierWeights[0] = 1; double[] classifierWeights2 = new double[numClassifiersToUse]; Classifier[] weakLearners2 = new Classifier[numClassifiersToUse]; System.arraycopy(classifierWeights, 0, classifierWeights2, 0, numClassifiersToUse); System.arraycopy(weakLearners, 0, weakLearners2, 0, numClassifiersToUse); for (int i = 0; i < classifierWeights2.length; i++) { logger.info("AdaBoostM2Trainer weight[weakLearner[" + i + "]]=" + classifierWeights2[i]); } return new AdaBoostM2 (trainingInsts.getPipe(), weakLearners2, classifierWeights2); } double beta = epsilon / (1 - epsilon); classifierWeights[round] = Math.log(1.0 / beta); // Update and normalize weights double sum = 0; for (int i = 0; i < weights.length; i++) { weights[i] *= Math.pow(beta, 0.5 * exponents[i]); sum += weights[i]; } MatrixOps.timesEquals(weights, 1.0 / sum); logger.info("=========== AdaBoostM2Trainer round " + (round+1) + " finished, pseudo-loss = " + epsilon); } for (int i = 0; i < classifierWeights.length; i++) logger.info("AdaBoostM2Trainer weight[weakLearner[" + i + "]]=" + classifierWeights[i]); this.classifier = new AdaBoostM2 (trainingInsts.getPipe(), weakLearners, classifierWeights); return classifier; } // returns an array of ints of the same size as data, // where the the samples are randomly chosen from data // using the distribution of the weights vector private int[] sampleWithWeights(int[] data, double[] weights, java.util.Random random) { if (weights.length != data.length) throw new IllegalArgumentException("length of weight vector must equal number of data points"); double sumOfWeights = 0; for (int i = 0; i < data.length; i++) { if (weights[i] < 0) throw new IllegalArgumentException("weight vector must be non-negative"); sumOfWeights += weights[i]; } if (sumOfWeights <= 0) throw new IllegalArgumentException("weights must sum to positive value"); int[] sample = new int[data.length]; double[] probabilities = new double[data.length]; double sumProbs = 0; for (int i = 0; i < data.length; i++) { sumProbs += random.nextDouble(); probabilities[i] = sumProbs; } MatrixOps.timesEquals(probabilities, sumOfWeights / sumProbs); // make sure rounding didn't mess things up probabilities[data.length - 1] = sumOfWeights; // do sampling int a = 0; int b = 0; sumProbs = 0; while (a < data.length && b < data.length) { sumProbs += weights[b]; while (a < data.length && probabilities[a] <= sumProbs) { sample[a] = data[b]; a++; } b++; } return sample; } }
7,963
38.621891
107
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/AdaBoost.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import cc.mallet.pipe.*; import cc.mallet.types.*; /** AdaBoost Robert E. Schapire. "The boosting approach to machine learning: An overview." In MSRI Workshop on Nonlinear Estimation and Classification, 2002. http://www.research.att.com/~schapire/cgi-bin/uncompress-papers/msri.ps @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class AdaBoost extends Classifier { Classifier[] weakClassifiers; double[] alphas; public AdaBoost (Pipe instancePipe, Classifier[] weakClassifiers, double[] alphas) { super (instancePipe); this.weakClassifiers = weakClassifiers; this.alphas = alphas; } // added by Gary /** * Get the number of weak classifiers in this ensemble classifier */ public int getNumWeakClassifiers() { return alphas.length; } // added by Gary /** * Return an AdaBoost classifier that uses only the first * <tt>numWeakClassifiersToUse</tt> weak learners. * * <p>The returned classifier's Pipe and weak classifiers * are backed by the respective objects of this classifier, * so changes to the returned classifier's Pipe and weak * classifiers are reflected in this classifier, and vice versa. */ public AdaBoost getTrimmedClassifier(int numWeakClassifiersToUse) { if (numWeakClassifiersToUse <= 0 || numWeakClassifiersToUse > weakClassifiers.length) throw new IllegalArgumentException("number of weak learners to use out of range:" + numWeakClassifiersToUse); Classifier[] newWeakClassifiers = new Classifier[numWeakClassifiersToUse]; System.arraycopy(weakClassifiers, 0, newWeakClassifiers, 0, numWeakClassifiersToUse); double[] newAlphas = new double[numWeakClassifiersToUse]; System.arraycopy(alphas, 0, newAlphas, 0, numWeakClassifiersToUse); return new AdaBoost(instancePipe, newWeakClassifiers, newAlphas); } public Classification classify (Instance inst) { return classify(inst, weakClassifiers.length); } /** * Classify the given instance using only the first * <tt>numWeakClassifiersToUse</tt> classifiers * trained during boosting */ public Classification classify (Instance inst, int numWeakClassifiersToUse) { if (numWeakClassifiersToUse <= 0 || numWeakClassifiersToUse > weakClassifiers.length) throw new IllegalArgumentException("number of weak learners to use out of range:" + numWeakClassifiersToUse); FeatureVector fv = (FeatureVector) inst.getData(); assert (instancePipe == null || fv.getAlphabet () == this.instancePipe.getDataAlphabet ()); int numClasses = getLabelAlphabet().size(); double[] scores = new double[numClasses]; int bestIndex; double sum = 0; // Gather scores of all weakClassifiers for (int round = 0; round < numWeakClassifiersToUse; round++) { bestIndex = weakClassifiers[round].classify(inst).getLabeling().getBestIndex(); scores[bestIndex] += alphas[round]; sum += scores[bestIndex]; } // Normalize the scores for (int i = 0; i < scores.length; i++) scores[i] /= sum; return new Classification (inst, this, new LabelVector (getLabelAlphabet(), scores)); } }
3,814
35.333333
96
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/NaiveBayesTrainer.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.io.Serializable; import java.io.ObjectOutputStream; import java.io.IOException; import java.io.ObjectInputStream; import java.util.Iterator; import cc.mallet.classify.Classifier; import cc.mallet.pipe.Noop; import cc.mallet.pipe.Pipe; import cc.mallet.types.Alphabet; import cc.mallet.types.AlphabetCarrying; import cc.mallet.types.FeatureSelection; import cc.mallet.types.FeatureVector; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.LabelVector; import cc.mallet.types.Labeling; import cc.mallet.types.Multinomial; /** * Class used to generate a NaiveBayes classifier from a set of training data. * In an Bayes classifier, * the p(Classification|Data) = p(Data|Classification)p(Classification)/p(Data) * <p> * To compute the likelihood: <br> * p(Data|Classification) = p(d1,d2,..dn | Classification) <br> * Naive Bayes makes the assumption that all of the data are conditionally * independent given the Classification: <br> * p(d1,d2,...dn | Classification) = p(d1|Classification)p(d2|Classification).. * <p> * As with other classifiers in Mallet, NaiveBayes is implemented as two classes: * a trainer and a classifier. The NaiveBayesTrainer produces estimates of the various * p(dn|Classifier) and contructs this class with those estimates. * <p> * A call to train() or incrementalTrain() produces a * {@link cc.mallet.classify.NaiveBayes} classifier that can * can be used to classify instances. A call to incrementalTrain() does not throw * away the internal state of the trainer; subsequent calls to incrementalTrain() * train by extending the previous training set. * <p> * A NaiveBayesTrainer can be persisted using serialization. * @see NaiveBayes * @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> * */ public class NaiveBayesTrainer extends ClassifierTrainer<NaiveBayes> implements ClassifierTrainer.ByInstanceIncrements<NaiveBayes>, Boostable, AlphabetCarrying, Serializable { // These function as default selections for the kind of Estimator used Multinomial.Estimator featureEstimator = new Multinomial.LaplaceEstimator(); Multinomial.Estimator priorEstimator = new Multinomial.LaplaceEstimator(); // Added to support incremental training. // These are the counts formed after NaiveBayes training. Note that // these are *not* the estimates passed to the NaiveBayes classifier; // rather the estimates are formed from these counts. // we could break these five fields out into a inner class. Multinomial.Estimator[] me; Multinomial.Estimator pe; double docLengthNormalization = -1; // A value of -1 means don't do any document length normalization NaiveBayes classifier; // If this style of incremental training is successful, the following members // should probably be moved up into IncrementalClassifierTrainer Pipe instancePipe; // Needed to construct a new classifier Alphabet dataAlphabet; // Extracted from InstanceList. Must be the same for all calls to incrementalTrain() Alphabet targetAlphabet; // Extracted from InstanceList. Must be the same for all calls to incrementalTrain public NaiveBayesTrainer (NaiveBayes initialClassifier) { if (initialClassifier != null) { this.instancePipe = initialClassifier.getInstancePipe(); this.dataAlphabet = initialClassifier.getAlphabet(); this.targetAlphabet = initialClassifier.getLabelAlphabet(); this.classifier = initialClassifier; } } public NaiveBayesTrainer (Pipe instancePipe) { this.instancePipe = instancePipe; this.dataAlphabet = instancePipe.getDataAlphabet(); this.targetAlphabet = instancePipe.getTargetAlphabet(); } public NaiveBayesTrainer () { } public NaiveBayes getClassifier () { return classifier; } public NaiveBayesTrainer setDocLengthNormalization (double d) { docLengthNormalization = d; return this; } public double getDocLengthNormalization () { return docLengthNormalization; } /** * Get the MultinomialEstimator instance used to specify the type of estimator * for features. * * @return estimator to be cloned on next call to train() or first call * to incrementalTrain() */ public Multinomial.Estimator getFeatureMultinomialEstimator () { return featureEstimator; } /** * Set the Multinomial Estimator used for features. The MulitnomialEstimator * is internally cloned and the clone is used to maintain the counts * that will be used to generate probability estimates * the next time train() or an initial incrementalTrain() is run. * Defaults to a Multinomial.LaplaceEstimator() * @param me to be cloned on next call to train() or first call * to incrementalTrain() */ public NaiveBayesTrainer setFeatureMultinomialEstimator (Multinomial.Estimator me) { if (instancePipe != null) throw new IllegalStateException("Can't set after incrementalTrain() is called"); featureEstimator = me; return this; } /** * Get the MultinomialEstimator instance used to specify the type of estimator * for priors. * * @return estimator to be cloned on next call to train() or first call * to incrementalTrain() */ public Multinomial.Estimator getPriorMultinomialEstimator () { return priorEstimator; } /** * Set the Multinomial Estimator used for priors. The MulitnomialEstimator * is internally cloned and the clone is used to maintain the counts * that will be used to generate probability estimates * the next time train() or an initial incrementalTrain() is run. * Defaults to a Multinomial.LaplaceEstimator() * @param me to be cloned on next call to train() or first call * to incrementalTrain() */ public NaiveBayesTrainer setPriorMultinomialEstimator (Multinomial.Estimator me) { if (instancePipe != null) throw new IllegalStateException("Can't set after incrementalTrain() is called"); priorEstimator = me; return this; } /** * Create a NaiveBayes classifier from a set of training data. * The trainer uses counts of each feature in an instance's feature vector * to provide an estimate of p(Labeling| feature). The internal state * of the trainer is thrown away ( by a call to reset() ) when train() returns. Each * call to train() is completely independent of any other. * @param trainingList The InstanceList to be used to train the classifier. * Within each instance the data slot is an instance of FeatureVector and the * target slot is an instance of Labeling * @param validationList Currently unused * @param testSet Currently unused * @param evaluator Currently unused * @param initialClassifier Currently unused * @return The NaiveBayes classifier as trained on the trainingList */ public NaiveBayes train (InstanceList trainingList) { // Forget all the previous sufficient statistics counts; me = null; pe = null; // Train a new classifier based on this data this.classifier = trainIncremental (trainingList); return classifier; } public NaiveBayes trainIncremental (InstanceList trainingInstancesToAdd) { // Initialize and check instance variables as necessary... setup(trainingInstancesToAdd, null); // Incrementally add the counts of this new training data for (Instance instance : trainingInstancesToAdd) incorporateOneInstance(instance, trainingInstancesToAdd.getInstanceWeight(instance)); // Estimate multinomials, and return a new naive Bayes classifier. // Note that, unlike MaxEnt, NaiveBayes is immutable, so we create a new one each time. classifier = new NaiveBayes (instancePipe, pe.estimate(), estimateFeatureMultinomials()); return classifier; } public NaiveBayes trainIncremental (Instance instance) { setup (null, instance); // Incrementally add the counts of this new training instance incorporateOneInstance (instance, 1.0); if (instancePipe == null) instancePipe = new Noop (dataAlphabet, targetAlphabet); classifier = new NaiveBayes (instancePipe, pe.estimate(), estimateFeatureMultinomials()); return classifier; } private void setup (InstanceList instances, Instance instance) { assert (instances != null || instance != null); if (instance == null && instances != null) instance = instances.get(0); // Initialize the alphabets if (dataAlphabet == null) { this.dataAlphabet = instance.getDataAlphabet(); this.targetAlphabet = instance.getTargetAlphabet(); } else if (!Alphabet.alphabetsMatch(instance, this)) // Make sure the alphabets match throw new IllegalArgumentException ("Training set alphabets do not match those of NaiveBayesTrainer."); // Initialize or check the instancePipe if (instances != null) { if (instancePipe == null) instancePipe = instances.getPipe(); else if (instancePipe != instances.getPipe()) // Make sure that this pipes match. Is this really necessary?? // I don't think so, but it could be confusing to have each returned classifier have a different pipe? -akm 1/08 throw new IllegalArgumentException ("Training set pipe does not match that of NaiveBayesTrainer."); } if (me == null) { int numLabels = targetAlphabet.size(); me = new Multinomial.Estimator[numLabels]; for (int i = 0; i < numLabels; i++) { me[i] = (Multinomial.Estimator) featureEstimator.clone(); me[i].setAlphabet(dataAlphabet); } pe = (Multinomial.Estimator) priorEstimator.clone(); } if (targetAlphabet.size() > me.length) { // target alphabet grew. increase size of our multinomial array int targetAlphabetSize = targetAlphabet.size(); // copy over old values Multinomial.Estimator[] newMe = new Multinomial.Estimator[targetAlphabetSize]; System.arraycopy (me, 0, newMe, 0, me.length); // initialize new expanded space for (int i= me.length; i<targetAlphabetSize; i++){ Multinomial.Estimator mest = (Multinomial.Estimator)featureEstimator.clone (); mest.setAlphabet (dataAlphabet); newMe[i] = mest; } me = newMe; } } private void incorporateOneInstance (Instance instance, double instanceWeight) { Labeling labeling = instance.getLabeling (); if (labeling == null) return; // Handle unlabeled instances by skipping them FeatureVector fv = (FeatureVector) instance.getData (); double oneNorm = fv.oneNorm(); if (oneNorm <= 0) return; // Skip instances that have no features present if (docLengthNormalization > 0) // Make the document have counts that sum to docLengthNormalization // I.e., if 20, it would be as if the document had 20 words. instanceWeight *= docLengthNormalization / oneNorm; assert (instanceWeight > 0 && !Double.isInfinite(instanceWeight)); for (int lpos = 0; lpos < labeling.numLocations(); lpos++) { int li = labeling.indexAtLocation (lpos); double labelWeight = labeling.valueAtLocation (lpos); if (labelWeight == 0) continue; //System.out.println ("NaiveBayesTrainer me.increment "+ labelWeight * instanceWeight); me[li].increment (fv, labelWeight * instanceWeight); // This relies on labelWeight summing to 1 over all labels pe.increment (li, labelWeight * instanceWeight); } } private Multinomial[] estimateFeatureMultinomials () { int numLabels = targetAlphabet.size(); Multinomial[] m = new Multinomial[numLabels]; for (int li = 0; li < numLabels; li++) { //me[li].print (); // debugging m[li] = me[li].estimate(); } return m; } /** * Create a NaiveBayes classifier from a set of training data and the * previous state of the trainer. Subsequent calls to incrementalTrain() * add to the state of the trainer. An incremental training session * should consist only of calls to incrementalTrain() and have no * calls to train(); * * @param trainingList The InstanceList to be used to train the classifier. * Within each instance the data slot is an instance of FeatureVector and the * target slot is an instance of Labeling * @param validationList Currently unused * @param testSet Currently unused * @param evaluator Currently unused * @param initialClassifier Currently unused * @return The NaiveBayes classifier as trained on the trainingList and the previous * trainingLists passed to incrementalTrain() */ public String toString() { return "NaiveBayesTrainer"; } // AlphabetCarrying interface public boolean alphabetsMatch(AlphabetCarrying object) { return Alphabet.alphabetsMatch (this, object); } public Alphabet getAlphabet() { return dataAlphabet; } public Alphabet[] getAlphabets() { return new Alphabet[] { dataAlphabet, targetAlphabet }; } // Serialization // serialVersionUID is overriden to prevent innocuous changes in this // class from making the serialization mechanism think the external // format has changed. private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject(ObjectOutputStream out) throws IOException { out.writeInt(CURRENT_SERIAL_VERSION); //default selections for the kind of Estimator used out.writeObject(featureEstimator); out.writeObject(priorEstimator); // These are the counts formed after NaiveBayes training. out.writeObject(me); out.writeObject(pe); // pipe and alphabets out.writeObject(instancePipe); out.writeObject(dataAlphabet); out.writeObject(targetAlphabet); } private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt(); if (version != CURRENT_SERIAL_VERSION) throw new ClassNotFoundException("Mismatched NaiveBayesTrainer versions: wanted " + CURRENT_SERIAL_VERSION + ", got " + version); //default selections for the kind of Estimator used featureEstimator = (Multinomial.Estimator) in.readObject(); priorEstimator = (Multinomial.Estimator) in.readObject(); // These are the counts formed after NaiveBayes training. me = (Multinomial.Estimator []) in.readObject(); pe = (Multinomial.Estimator) in.readObject(); // pipe and alphabets instancePipe = (Pipe) in.readObject(); dataAlphabet = (Alphabet) in.readObject(); targetAlphabet = (Alphabet) in.readObject(); } public static class Factory extends ClassifierTrainer.Factory<NaiveBayesTrainer> { Multinomial.Estimator featureEstimator = new Multinomial.LaplaceEstimator(); Multinomial.Estimator priorEstimator = new Multinomial.LaplaceEstimator(); double docLengthNormalization = -1; public NaiveBayesTrainer newClassifierTrainer(Classifier initialClassifier) { return new NaiveBayesTrainer ((NaiveBayes)initialClassifier); } public NaiveBayesTrainer.Factory setDocLengthNormalization (double docLengthNormalization) { this.docLengthNormalization = docLengthNormalization; return this; } public NaiveBayesTrainer.Factory setFeatureMultinomialEstimator (Multinomial.Estimator featureEstimator) { this.featureEstimator = featureEstimator; return this; } public NaiveBayesTrainer.Factory setPriorMultinomialEstimator (Multinomial.Estimator priorEstimator) { this.priorEstimator = priorEstimator; return this; } } }
16,160
37.848558
118
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/Classifier.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.classify; import java.util.ArrayList; import java.util.List; import java.util.logging.*; import java.io.PrintWriter; import java.io.Serializable; import cc.mallet.pipe.Pipe; import cc.mallet.types.Alphabet; import cc.mallet.types.AlphabetCarrying; import cc.mallet.types.FeatureVector; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.Label; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.Labeling; import cc.mallet.types.FeatureSelection; /** * Abstract parent of all Classifiers. * <p> * All classification techniques in MALLET are implemented as two classes: * a trainer and a classifier. The trainer injests the training data * and creates a classifier that holds the parameters set during training. * The classifier applies those parameters to an Instance to produce * a classification of the Instance. * <p> * A concrete classifier is required only to be able to classify an instance. * <p> * Methods for classifying an InstanceList are here. There are * also methods for calculating precison, recall, and f1 from either * InstanceLists (which are classified first) or an ArrayList of * classifications. Similar functionality is also in * {@link cc.mallet.classify.Trial} * * <p> A classifier holds a reference to the pipe that was used to * create the Instances being classified. Most classifiers use * this to make sure the Alphabets of the instances being classified * are the same Alphabet objects used during training. * <p> * Alphabets are allowed to between training and classification. * @see ClassifierTrainer * @see Instance * @see InstanceList * @see Classification * @see Trial */ public abstract class Classifier implements AlphabetCarrying, Serializable { private static Logger logger = Logger.getLogger(Classifier.class.getName()); protected Pipe instancePipe; /** For serialization only. */ protected Classifier() { } public Classifier (Pipe instancePipe) { this.instancePipe = instancePipe; // All classifiers must have set of labels. assert (instancePipe.getTargetAlphabet() != null); assert (instancePipe.getTargetAlphabet().getClass().isAssignableFrom(LabelAlphabet.class)); // Not all classifiers require a feature dictionary, however. } // TODO Change this method name to getPipe(); public Pipe getInstancePipe () { return instancePipe; } public Alphabet getAlphabet () { return (Alphabet) instancePipe.getDataAlphabet(); } public LabelAlphabet getLabelAlphabet () { return (LabelAlphabet) instancePipe.getTargetAlphabet(); } public Alphabet[] getAlphabets() { return new Alphabet[] {getAlphabet(), getLabelAlphabet()}; } public boolean alphabetsMatch (AlphabetCarrying object) { Alphabet[] otherAlphabets = object.getAlphabets(); if (otherAlphabets.length == 2 && otherAlphabets[0] == getAlphabet() && otherAlphabets[1] == getLabelAlphabet()) return true; return false; } // TODO Make argument List<Instance> public ArrayList<Classification> classify (InstanceList instances) { ArrayList<Classification> ret = new ArrayList<Classification> (instances.size()); for (Instance inst : instances) ret.add (classify (inst)); return ret; } public Classification[] classify (Instance[] instances) { Classification[] ret = new Classification[instances.length]; for (int i = 0; i < instances.length; i++) ret[i] = classify (instances[i]); return ret; } public abstract Classification classify (Instance instance); /** Pipe the object through this classifier's pipe, then classify the resulting instance. */ public Classification classify (Object obj) { if (obj instanceof Instance) return classify ((Instance)obj); return classify (instancePipe.instanceFrom(new Instance (obj, null, null, null))); } public FeatureSelection getFeatureSelection () { return null; } public FeatureSelection[] getPerClassFeatureSelection () { return null; } // Various evaluation methods public double getAccuracy (InstanceList ilist) { return new Trial(this, ilist).getAccuracy(); } public double getPrecision (InstanceList ilist, int index) { return new Trial(this, ilist).getPrecision(index); } public double getPrecision (InstanceList ilist, Labeling labeling) { return new Trial(this, ilist).getPrecision(labeling); } public double getPrecision (InstanceList ilist, Object labelEntry) { return new Trial(this, ilist).getPrecision(labelEntry); } public double getRecall (InstanceList ilist, int index) { return new Trial(this, ilist).getRecall(index); } public double getRecall (InstanceList ilist, Labeling labeling) { return new Trial(this, ilist).getRecall(labeling); } public double getRecall (InstanceList ilist, Object labelEntry) { return new Trial(this, ilist).getRecall(labelEntry); } public double getF1 (InstanceList ilist, int index) { return new Trial(this, ilist).getF1(index); } public double getF1 (InstanceList ilist, Labeling labeling) { return new Trial(this, ilist).getF1(labeling); } public double getF1 (InstanceList ilist, Object labelEntry) { return new Trial(this, ilist).getF1(labelEntry); } public double getAverageRank (InstanceList ilist) { return new Trial(this, ilist).getAverageRank(); } /** * Outputs human-readable description of classifier (e.g., list of weights, decision tree) * to System.out */ public void print () { System.out.println ("Classifier "+getClass().getName()+"\n Detailed printout not yet implemented."); } public void print (PrintWriter out) { out.println ("Classifier "+getClass().getName()+"\n Detailed printout not yet implemented."); } }
6,183
34.54023
127
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/Trial.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.logging.Logger; import cc.mallet.pipe.Pipe; import cc.mallet.types.Alphabet; import cc.mallet.types.FeatureVector; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.Label; import cc.mallet.types.Labeling; /** * Stores the results of classifying a collection of Instances, * and provides many methods for evaluating the results. * * If you just need one evaluation result, you may find it easier to one * of the corresponding methods in Classifier, which simply call the methods here. * * @see InstanceList * @see Classifier * @see Classification * * @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class Trial extends ArrayList<Classification> { private static Logger logger = Logger.getLogger(Trial.class.getName()); Classifier classifier; public Trial (Classifier c, InstanceList ilist) { super (ilist.size()); this.classifier = c; for (Instance instance : ilist) this.add (c.classify (instance)); } public boolean add (Classification c) { if (c.getClassifier() != this.classifier) throw new IllegalArgumentException ("Trying to add Classification from a different Classifier."); return super.add (c); } public void add (int index, Classification c) { if (c.getClassifier() != this.classifier) throw new IllegalArgumentException ("Trying to add Classification from a different Classifier."); super.add (index, c); } public boolean addAll(Collection<? extends Classification> collection) { boolean ret = true; for (Classification c : collection) if (!this.add(c)) ret = false; return ret; } public boolean addAll (int index, Collection<? extends Classification> collection) { throw new IllegalStateException ("Not implemented."); } public Classifier getClassifier () { return classifier; } /** Return the fraction of instances that have the correct label as their best predicted label. */ public double getAccuracy () { int numCorrect = 0; for (int i = 0; i < this.size(); i++) if (this.get(i).bestLabelIsCorrect()) numCorrect++; return (double)numCorrect/this.size(); } /** Calculate the precision of the classifier on an instance list for a particular target entry */ public double getPrecision (Object labelEntry) { int index; if (labelEntry instanceof Labeling) index = ((Labeling)labelEntry).getBestIndex(); else index = classifier.getLabelAlphabet().lookupIndex(labelEntry, false); if (index == -1) throw new IllegalArgumentException ("Label "+labelEntry.toString()+" is not a valid label."); return getPrecision (index); } public double getPrecision (Labeling label) { return getPrecision (label.getBestIndex()); } /** Calculate the precision for a particular target index from an array list of classifications */ public double getPrecision (int index) { int numCorrect = 0; int numInstances = 0; int trueLabel, classLabel; for (int i = 0; i<this.size(); i++) { trueLabel = this.get(i).getInstance().getLabeling().getBestIndex(); classLabel = this.get(i).getLabeling().getBestIndex(); if (classLabel == index) { numInstances++; if (trueLabel == index) numCorrect++; } } // [email protected] // When no examples are predicted to have this label, // we define precision to be 1. if (numInstances==0) { logger.warning("No examples with predicted label " + classifier.getLabelAlphabet().lookupLabel(index) + "!"); assert(numCorrect == 0); return 1; } return ((double)numCorrect/(double)numInstances); } /** Calculate the recall of the classifier on an instance list for a particular target entry */ public double getRecall (Object labelEntry) { int index; if (labelEntry instanceof Labeling) index = ((Labeling)labelEntry).getBestIndex(); else index = classifier.getLabelAlphabet().lookupIndex(labelEntry, false); if (index == -1) throw new IllegalArgumentException ("Label "+labelEntry.toString()+" is not a valid label."); return getRecall (index); } public double getRecall (Labeling label) { return getRecall (label.getBestIndex()); } /** Calculate the recall for a particular target index from an array list of classifications */ public double getRecall (int labelIndex) { int numCorrect = 0; int numInstances = 0; int trueLabel, classLabel; for (int i = 0; i<this.size(); i++) { trueLabel = this.get(i).getInstance().getLabeling().getBestIndex(); classLabel = this.get(i).getLabeling().getBestIndex(); if ( trueLabel == labelIndex ) { numInstances++; if ( classLabel == labelIndex) numCorrect++; } } // [email protected] // When no examples have this label, // we define recall to be 1. if (numInstances==0) { logger.warning("No examples with true label " + classifier.getLabelAlphabet().lookupLabel(labelIndex) + "!"); assert(numCorrect == 0); return 1; } return ((double)numCorrect/(double)numInstances); } /** Calculate the F1-measure of the classifier on an instance list for a particular target entry */ public double getF1 (Object labelEntry) { int index; if (labelEntry instanceof Labeling) index = ((Labeling)labelEntry).getBestIndex(); else index = classifier.getLabelAlphabet().lookupIndex(labelEntry, false); if (index == -1) throw new IllegalArgumentException ("Label "+labelEntry.toString()+" is not a valid label."); return getF1 (index); } public double getF1 (Labeling label) { return getF1 (label.getBestIndex()); } /** Calculate the F1-measure for a particular target index from an array list of classifications */ public double getF1 (int index) { double precision = getPrecision (index); double recall = getRecall (index); // [email protected] // When both precision and recall are 0, F1 is 0. if (precision==0.0 && recall==0.0) { return 0; } return 2*precision*recall/(precision+recall); } /** Return the average rank of the correct class label as returned by Labeling.getRank(correctLabel) on the predicted Labeling. */ public double getAverageRank () { double rsum = 0; Labeling tmpL; Classification tmpC; Instance tmpI; Label tmpLbl, tmpLbl2; int tmpInt; for(int i = 0; i < this.size(); i++) { tmpC = this.get(i); tmpI = tmpC.getInstance(); tmpL = tmpC.getLabeling(); tmpLbl = (Label)tmpI.getTarget(); tmpInt = tmpL.getRank(tmpLbl); tmpLbl2 = tmpL.getLabelAtRank(0); rsum = rsum + tmpInt; } return rsum/this.size(); } }
7,147
27.706827
131
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/FeatureConstraintUtil.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.logging.Logger; import cc.mallet.fst.TokenAccuracyEvaluator; import cc.mallet.topics.LDAHyper; import cc.mallet.topics.ParallelTopicModel; import cc.mallet.types.Alphabet; import cc.mallet.types.FeatureVector; import cc.mallet.types.IDSorter; import cc.mallet.types.InfoGain; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.Labeling; import cc.mallet.types.MatrixOps; import cc.mallet.util.MalletLogger; import cc.mallet.util.Maths; /** * Utility functions for creating feature constraints that can be used with GE training. * @author Gregory Druck <a href="mailto:[email protected]">[email protected]</a> */ public class FeatureConstraintUtil { private static Logger logger = MalletLogger.getLogger(FeatureConstraintUtil.class.getName()); /** * Reads feature constraints from a file, whether they are stored * using Strings or indices. * * @param filename File with feature constraints. * @param data InstanceList used for alphabets. * @return Constraints. */ public static HashMap<Integer,double[]> readConstraintsFromFile(String filename, InstanceList data) { if (testConstraintsFileIndexBased(filename)) { return readConstraintsFromFileIndex(filename,data); } return readConstraintsFromFileString(filename,data); } /** * Reads feature constraints stored using strings from a file. * * feature_name (label_name:probability)+ * * Labels that do appear get probability 0. * * @param filename File with feature constraints. * @param data InstanceList used for alphabets. * @return Constraints. */ public static HashMap<Integer,double[]> readConstraintsFromFileString(String filename, InstanceList data) { HashMap<Integer,double[]> constraints = new HashMap<Integer,double[]>(); File file = new File(filename); try { BufferedReader reader = new BufferedReader(new FileReader(file)); String line = reader.readLine(); while (line != null) { String[] split = line.split("\\s+"); // assume the feature name has no spaces String featureName = split[0]; int featureIndex = data.getDataAlphabet().lookupIndex(featureName,false); assert(split.length - 1 == data.getTargetAlphabet().size()); double[] probs = new double[split.length - 1]; for (int index = 1; index < split.length; index++) { String[] labelSplit = split[index].split(":"); int li = data.getTargetAlphabet().lookupIndex(labelSplit[0],false); double prob = Double.parseDouble(labelSplit[1]); probs[li] = prob; } constraints.put(featureIndex, probs); line = reader.readLine(); } } catch (Exception e) { e.printStackTrace(); System.exit(1); } return constraints; } /** * Reads feature constraints stored using strings from a file. * * feature_index label_0_prob label_1_prob ... label_n_prob * * Here each label must appear. * * @param filename File with feature constraints. * @param data InstanceList used for alphabets. * @return Constraints. */ public static HashMap<Integer,double[]> readConstraintsFromFileIndex(String filename, InstanceList data) { HashMap<Integer,double[]> constraints = new HashMap<Integer,double[]>(); File file = new File(filename); try { BufferedReader reader = new BufferedReader(new FileReader(file)); String line = reader.readLine(); while (line != null) { String[] split = line.split("\\s+"); int featureIndex = Integer.parseInt(split[0]); assert(split.length - 1 == data.getTargetAlphabet().size()); double[] probs = new double[split.length - 1]; for (int index = 1; index < split.length; index++) { double prob = Double.parseDouble(split[index]); probs[index-1] = prob; } constraints.put(featureIndex, probs); line = reader.readLine(); } } catch (Exception e) { e.printStackTrace(); System.exit(1); } return constraints; } private static boolean testConstraintsFileIndexBased(String filename) { File file = new File(filename); String firstLine = ""; try { BufferedReader reader = new BufferedReader(new FileReader(file)); firstLine = reader.readLine(); } catch (Exception e) { e.printStackTrace(); System.exit(1); } return !firstLine.contains(":"); } /** * Select features with the highest information gain. * * @param list InstanceList for computing information gain. * @param numFeatures Number of features to select. * @return List of features with the highest information gains. */ public static ArrayList<Integer> selectFeaturesByInfoGain(InstanceList list, int numFeatures) { ArrayList<Integer> features = new ArrayList<Integer>(); InfoGain infogain = new InfoGain(list); for (int rank = 0; rank < numFeatures; rank++) { features.add(infogain.getIndexAtRank(rank)); } return features; } /** * Select top features in LDA topics. * * @param numSelFeatures Number of features to select. * @param ldaEst LDAEstimatePr which provides an interface to an LDA model. * @param seqAlphabet The alphabet for the sequence dataset, which may be different from the vector dataset alphabet. * @param alphabet The vector dataset alphabet. * @return ArrayList with the int indices of the selected features. */ public static ArrayList<Integer> selectTopLDAFeatures(int numSelFeatures, ParallelTopicModel lda, Alphabet alphabet) { ArrayList<Integer> features = new ArrayList<Integer>(); Alphabet seqAlphabet = lda.getAlphabet(); int numTopics = lda.getNumTopics(); Object[][] sorted = lda.getTopWords(seqAlphabet.size()); for (int pos = 0; pos < seqAlphabet.size(); pos++) { for (int ti = 0; ti < numTopics; ti++) { Object feat = sorted[ti][pos].toString(); int fi = alphabet.lookupIndex(feat,false); if ((fi >=0) && (!features.contains(fi))) { logger.info("Selected feature: " + feat); features.add(fi); if (features.size() == numSelFeatures) { return features; } } } } return features; } /** * Set target distributions using estimates from data. * * @param list InstanceList used to estimate target distributions. * @param features List of features for constraints. * @return Constraints (map of feature index to target distribution), with target * distributions set using estimates from supplied data. */ public static HashMap<Integer,double[]> setTargetsUsingData(InstanceList list, ArrayList<Integer> features) { HashMap<Integer,double[]> constraints = new HashMap<Integer,double[]>(); double[][] featureLabelCounts = getFeatureLabelCounts(list); for (int i = 0; i < features.size(); i++) { int fi = features.get(i); if (fi != list.getDataAlphabet().size()) { double[] prob = featureLabelCounts[fi]; // Smooth probability distributions by adding a (very) // small count. We just need to make sure they aren't // zero in which case the KL-divergence is infinite. MatrixOps.plusEquals(prob, 1e-8); MatrixOps.timesEquals(prob, 1./MatrixOps.sum(prob)); constraints.put(fi, prob); } } return constraints; } /** * Set target distributions using "Schapire" heuristic described in * "Learning from Labeled Features using Generalized Expectation Criteria" * Gregory Druck, Gideon Mann, Andrew McCallum. * * @param labeledFeatures HashMap of feature indices to lists of label indices for that feature. * @param numLabels Total number of labels. * @param majorityProb Probability mass divided among majority labels. * @return Constraints (map of feature index to target distribution), with target * distributions set using heuristic. */ public static HashMap<Integer,double[]> setTargetsUsingHeuristic(HashMap<Integer,ArrayList<Integer>> labeledFeatures, int numLabels, double majorityProb) { HashMap<Integer,double[]> constraints = new HashMap<Integer,double[]>(); Iterator<Integer> keyIter = labeledFeatures.keySet().iterator(); while (keyIter.hasNext()) { int fi = keyIter.next(); ArrayList<Integer> labels = labeledFeatures.get(fi); constraints.put(fi, getHeuristicPrior(labels,numLabels,majorityProb)); } return constraints; } /** * Set target distributions using feature voting heuristic described in * "Learning from Labeled Features using Generalized Expectation Criteria" * Gregory Druck, Gideon Mann, Andrew McCallum. * * @param labeledFeatures HashMap of feature indices to lists of label indices for that feature. * @param trainingData InstanceList to use for computing expectations with feature voting. * @return Constraints (map of feature index to target distribution), with target * distributions set using feature voting. */ public static HashMap<Integer, double[]> setTargetsUsingFeatureVoting(HashMap<Integer,ArrayList<Integer>> labeledFeatures, InstanceList trainingData) { HashMap<Integer,double[]> constraints = new HashMap<Integer,double[]>(); int numLabels = trainingData.getTargetAlphabet().size(); Iterator<Integer> keyIter = labeledFeatures.keySet().iterator(); double[][] featureCounts = new double[labeledFeatures.size()][numLabels]; for (int ii = 0; ii < trainingData.size(); ii++) { Instance instance = trainingData.get(ii); FeatureVector fv = (FeatureVector)instance.getData(); Labeling labeling = trainingData.get(ii).getLabeling(); double[] labelDist = new double[numLabels]; if (labeling == null) { labelByVoting(labeledFeatures,instance,labelDist); } else { int li = labeling.getBestIndex(); labelDist[li] = 1.; } keyIter = labeledFeatures.keySet().iterator(); int i = 0; while (keyIter.hasNext()) { int fi = keyIter.next(); if (fv.location(fi) >= 0) { for (int li = 0; li < numLabels; li++) { featureCounts[i][li] += labelDist[li] * fv.valueAtLocation(fv.location(fi)); } } i++; } } keyIter = labeledFeatures.keySet().iterator(); int i = 0; while (keyIter.hasNext()) { int fi = keyIter.next(); // smoothing counts MatrixOps.plusEquals(featureCounts[i], 1e-8); MatrixOps.timesEquals(featureCounts[i],1./MatrixOps.sum(featureCounts[i])); constraints.put(fi, featureCounts[i]); i++; } return constraints; } /** * Label features using heuristic described in * "Learning from Labeled Features using Generalized Expectation Criteria" * Gregory Druck, Gideon Mann, Andrew McCallum. * * @param list InstanceList used to compute statistics for labeling features. * @param features List of features to label. * @return Labeled features, HashMap mapping feature indices to list of labels. */ public static HashMap<Integer, ArrayList<Integer>> labelFeatures(InstanceList list, ArrayList<Integer> features) { HashMap<Integer,ArrayList<Integer>> labeledFeatures = new HashMap<Integer,ArrayList<Integer>>(); double[][] featureLabelCounts = getFeatureLabelCounts(list); int numLabels = list.getTargetAlphabet().size(); int minRank = 100 * numLabels; InfoGain infogain = new InfoGain(list); double sum = 0; for (int rank = 0; rank < minRank; rank++) { sum += infogain.getValueAtRank(rank); } double mean = sum / minRank; for (int i = 0; i < features.size(); i++) { int fi = features.get(i); // reject features with infogain // less than cutoff if (infogain.value(fi) < mean) { logger.info("Oracle labeler rejected labeling: " + list.getDataAlphabet().lookupObject(fi)); continue; } double[] prob = featureLabelCounts[fi]; MatrixOps.plusEquals(prob,1e-8); MatrixOps.timesEquals(prob, 1./MatrixOps.sum(prob)); int[] sortedIndices = getMaxIndices(prob); ArrayList<Integer> labels = new ArrayList<Integer>(); if (numLabels > 2) { // take anything within a factor of 2 of the best // but no more than numLabels/2 boolean discard = false; double threshold = prob[sortedIndices[0]] / 2; for (int li = 0; li < numLabels; li++) { if (prob[li] > threshold) { labels.add(li); } if (labels.size() > (numLabels / 2)) { discard = true; break; } } if (discard) { continue; } } else { labels.add(sortedIndices[0]); } labeledFeatures.put(fi, labels); } return labeledFeatures; } private static double[][] getFeatureLabelCounts(InstanceList list) { int numFeatures = list.getDataAlphabet().size(); int numLabels = list.getTargetAlphabet().size(); double[][] featureLabelCounts = new double[numFeatures][numLabels]; for (int ii = 0; ii < list.size(); ii++) { Instance instance = list.get(ii); FeatureVector featureVector = (FeatureVector)instance.getData(); // this handles distributions over labels for (int li = 0; li < numLabels; li++) { double py = instance.getLabeling().value(li); for (int loc = 0; loc < featureVector.numLocations(); loc++) { int fi = featureVector.indexAtLocation(loc); double val = featureVector.valueAtLocation(loc); featureLabelCounts[fi][li] += py * val; } } } return featureLabelCounts; } private static double[] getHeuristicPrior (ArrayList<Integer> labeledFeatures, int numLabels, double majorityProb) { int numIndices = labeledFeatures.size(); double[] dist = new double[numLabels]; if (numIndices == numLabels) { for (int i = 0; i < dist.length; i++) { dist[i] = 1./numLabels; } return dist; } double keywordProb = majorityProb / numIndices; double otherProb = (1 - majorityProb) / (numLabels - numIndices); for (int i = 0; i < labeledFeatures.size(); i++) { int li = labeledFeatures.get(i); dist[li] = keywordProb; } for (int li = 0; li < numLabels; li++) { if (dist[li] == 0) { dist[li] = otherProb; } } assert(Maths.almostEquals(MatrixOps.sum(dist),1)); return dist; } private static void labelByVoting(HashMap<Integer,ArrayList<Integer>> labeledFeatures, Instance instance, double[] scores) { FeatureVector fv = (FeatureVector)instance.getData(); int numFeatures = instance.getDataAlphabet().size() + 1; int[] numLabels = new int[instance.getTargetAlphabet().size()]; Iterator<Integer> keyIterator = labeledFeatures.keySet().iterator(); while (keyIterator.hasNext()) { ArrayList<Integer> majorityClassList = labeledFeatures.get(keyIterator.next()); for (int i = 0; i < majorityClassList.size(); i++) { int li = majorityClassList.get(i); numLabels[li]++; } } keyIterator = labeledFeatures.keySet().iterator(); while (keyIterator.hasNext()) { int next = keyIterator.next(); assert(next < numFeatures); int loc = fv.location(next); if (loc < 0) { continue; } ArrayList<Integer> majorityClassList = labeledFeatures.get(next); for (int i = 0; i < majorityClassList.size(); i++) { int li = majorityClassList.get(i); scores[li] += 1; } } double sum = MatrixOps.sum(scores); if (sum == 0) { MatrixOps.plusEquals(scores, 1.0); sum = MatrixOps.sum(scores); } for (int li = 0; li < scores.length; li++) { scores[li] /= sum; } } /* * These functions are no longer needed. * private static double[][] getPrWordTopic(LDAHyper lda){ int numTopics = lda.getNumTopics(); int numTypes = lda.getAlphabet().size(); double[][] prWordTopic = new double[numTopics][numTypes]; for (int ti = 0 ; ti < numTopics; ti++){ for (int wi = 0 ; wi < numTypes; wi++){ prWordTopic[ti][wi] = (double) lda.getCountFeatureTopic(wi, ti) / (double) lda.getCountTokensPerTopic(ti); } } return prWordTopic; } private static int[][] getSortedTopic(double[][] prTopicWord){ int numTopics = prTopicWord.length; int numTypes = prTopicWord[0].length; int[][] sortedTopicIdx = new int[numTopics][numTypes]; for (int ti = 0; ti < numTopics; ti++){ int[] topicIdx = getMaxIndices(prTopicWord[ti]); System.arraycopy(topicIdx, 0, sortedTopicIdx[ti], 0, topicIdx.length); } return sortedTopicIdx; } */ private static int[] getMaxIndices(double[] x) { ArrayList<Element> list = new ArrayList<Element>(); for (int i = 0; i < x.length; i++) { Element element = new Element(i,x[i]); list.add(element); } Collections.sort(list); Collections.reverse(list); int[] sortedIndices = new int[x.length]; for (int i = 0; i < x.length; i++) { sortedIndices[i] = list.get(i).index; } return sortedIndices; } private static class Element implements Comparable<Element> { private int index; private double value; public Element(int index, double value) { this.index = index; this.value = value; } public int compareTo(Element element) { return Double.compare(this.value, element.value); } } }
18,628
33.690875
157
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/AdaBoostTrainer.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.util.Random; import java.util.logging.*; import cc.mallet.types.*; import cc.mallet.util.MalletLogger; import cc.mallet.util.Maths; /** * This version of AdaBoost should be used only for binary classification. * Use AdaBoost.M2 for multi-class problems. * * <p>Robert E. Schapire. * "A decision-theoretic generalization of on-line learning and * an application to boosting" * In Journal of Computer and System Sciences * http://www.cs.princeton.edu/~schapire/uncompress-papers.cgi/FreundSc95.ps * * @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class AdaBoostTrainer extends ClassifierTrainer<AdaBoost> { private static Logger logger = MalletLogger.getLogger(AdaBoostTrainer.class.getName()); private static int MAX_NUM_RESAMPLING_ITERATIONS = 10; ClassifierTrainer weakLearner; int numRounds; AdaBoost classifier; public AdaBoost getClassifier () { return classifier; } public AdaBoostTrainer (ClassifierTrainer weakLearner, int numRounds) { if (! (weakLearner instanceof Boostable)) throw new IllegalArgumentException ("weak learner not boostable"); if (numRounds <= 0) throw new IllegalArgumentException ("number of rounds must be positive"); this.weakLearner = weakLearner; this.numRounds = numRounds; } public AdaBoostTrainer (ClassifierTrainer weakLearner) { this (weakLearner, 100); } /** * Boosting method that resamples instances using their weights */ public AdaBoost train (InstanceList trainingList) { FeatureSelection selectedFeatures = trainingList.getFeatureSelection(); if (selectedFeatures != null) throw new UnsupportedOperationException("FeatureSelection not yet implemented."); java.util.Random random = new java.util.Random(); // Set the initial weights to be uniform double w = 1.0 / trainingList.size(); InstanceList trainingInsts = new InstanceList(trainingList.getPipe(), trainingList.size()); for (int i = 0; i < trainingList.size(); i++) trainingInsts.add(trainingList.get(i), w); boolean[] correct = new boolean[trainingInsts.size()]; int numClasses = trainingInsts.getTargetAlphabet().size(); if (numClasses != 2) logger.info("AdaBoostTrainer.train: WARNING: more than two classes"); Classifier[] weakLearners = new Classifier[numRounds]; double[] alphas = new double[numRounds]; InstanceList roundTrainingInsts = new InstanceList(trainingInsts.getPipe()); // Boosting iterations for (int round = 0; round < numRounds; round++) { logger.info("=========== AdaBoostTrainer round " + (round+1) + " begin"); // Keep resampling the training instances (using the distribution // of instance weights) on which to train the weak learner until // either we exceed the preset number of maximum iterations, or // the weak learner makes a non-zero error on trainingInsts // (this makes sure we sample at least some 'hard' instances). int resamplingIterations = 0; double err; do { err = 0; roundTrainingInsts = trainingInsts.sampleWithInstanceWeights(random); weakLearners[round] = weakLearner.train (roundTrainingInsts); // Calculate error for (int i = 0; i < trainingInsts.size(); i++) { Instance inst = trainingInsts.get(i); if (weakLearners[round].classify(inst).bestLabelIsCorrect()) correct[i] = true; else { correct[i] = false; err += trainingInsts.getInstanceWeight(i); } } resamplingIterations++; } while (Maths.almostEquals(err, 0) && resamplingIterations < MAX_NUM_RESAMPLING_ITERATIONS); // Stop boosting when error is too big or 0, // ignoring weak classifier trained this round if (Maths.almostEquals(err, 0) || err > 0.5) { logger.info("AdaBoostTrainer stopped at " + (round+1) + " / " + numRounds + " rounds: numClasses=" + numClasses + " error=" + err); // If we are in the first round, have to use the weak classifier in any case int numClassifiersToUse = (round == 0) ? 1 : round; if (round == 0) alphas[0] = 1; double[] betas = new double[numClassifiersToUse]; Classifier[] weakClassifiers = new Classifier[numClassifiersToUse]; System.arraycopy(alphas, 0, betas, 0, numClassifiersToUse); System.arraycopy(weakLearners, 0, weakClassifiers, 0, numClassifiersToUse); for (int i = 0; i < betas.length; i++) logger.info("AdaBoostTrainer weight[weakLearner[" + i + "]]=" + betas[i]); return new AdaBoost (roundTrainingInsts.getPipe(), weakClassifiers, betas); } // Calculate the weight to assign to this weak classifier // This formula is really designed for binary classifiers that don't // give a confidence score. Use AdaBoostMH for multi-class or // multi-labeled data. alphas[round] = Math.log((1 - err) / err); double reweightFactor = err / (1 - err); double sum = 0; // Decrease weights of correctly classified instances for (int i = 0; i < trainingInsts.size(); i++) { w = trainingInsts.getInstanceWeight(i); if (correct[i]) w *= reweightFactor; trainingInsts.setInstanceWeight (i, w); sum += w; } // Normalize the instance weights for (int i = 0; i < trainingInsts.size(); i++) { trainingInsts.setInstanceWeight (i, trainingInsts.getInstanceWeight(i) / sum); } logger.info("=========== AdaBoostTrainer round " + (round+1) + " finished, weak classifier training error = " + err); } for (int i = 0; i < alphas.length; i++) logger.info("AdaBoostTrainer weight[weakLearner[" + i + "]]=" + alphas[i]); this.classifier = new AdaBoost (roundTrainingInsts.getPipe(), weakLearners, alphas); return classifier; } }
6,128
38.038217
94
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/ConfidencePredictingClassifierTrainer.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.classify; import java.util.ArrayList; import java.util.logging.*; import cc.mallet.classify.evaluate.*; import cc.mallet.pipe.Classification2ConfidencePredictingFeatureVector; import cc.mallet.pipe.Pipe; import cc.mallet.types.*; import cc.mallet.util.MalletLogger; import cc.mallet.util.PropertyList; public class ConfidencePredictingClassifierTrainer extends ClassifierTrainer<ConfidencePredictingClassifier> implements Boostable { private static Logger logger = MalletLogger.getLogger(ConfidencePredictingClassifierTrainer.class.getName()); ClassifierTrainer underlyingClassifierTrainer; MaxEntTrainer confidencePredictingClassifierTrainer; //DecisionTreeTrainer confidencePredictingClassifierTrainer; //NaiveBayesTrainer confidencePredictingClassifierTrainer; Pipe confidencePredictingPipe; static ConfusionMatrix confusionMatrix = null; ConfidencePredictingClassifier classifier; public ConfidencePredictingClassifier getClassifier () { return classifier; } public ConfidencePredictingClassifierTrainer (ClassifierTrainer underlyingClassifierTrainer, InstanceList validationSet, Pipe confidencePredictingPipe) { this.confidencePredictingPipe = confidencePredictingPipe; this.confidencePredictingClassifierTrainer = new MaxEntTrainer(); this.validationSet = validationSet; //this.confidencePredictingClassifierTrainer = new DecisionTreeTrainer(); //this.confidencePredictingClassifierTrainer = new NaiveBayesTrainer(); this.underlyingClassifierTrainer = underlyingClassifierTrainer; } public ConfidencePredictingClassifierTrainer (ClassifierTrainer underlyingClassifierTrainer, InstanceList validationSet) { this (underlyingClassifierTrainer, validationSet, new Classification2ConfidencePredictingFeatureVector()); } public ConfidencePredictingClassifier train (InstanceList trainList) { FeatureSelection selectedFeatures = trainList.getFeatureSelection(); logger.fine ("Training underlying classifier"); Classifier c = underlyingClassifierTrainer.train (trainList); confusionMatrix = new ConfusionMatrix(new Trial(c, trainList)); assert (validationSet != null) : "This ClassifierTrainer requires a validation set."; Trial t = new Trial (c, validationSet); double accuracy = t.getAccuracy(); InstanceList confidencePredictionTraining = new InstanceList (confidencePredictingPipe); logger.fine ("Creating confidence prediction instance list"); double weight; for (int i = 0; i < t.size(); i++) { Classification classification = t.get(i); confidencePredictionTraining.add (classification, null, classification.getInstance().getName(), classification.getInstance().getSource()); } logger.info("Begin training ConfidencePredictingClassifier . . . "); Classifier cpc = confidencePredictingClassifierTrainer.train (confidencePredictionTraining); logger.info("Accuracy at predicting correct/incorrect in training = " + cpc.getAccuracy(confidencePredictionTraining)); // get most informative features per class, then combine to make // new feature conjunctions PerLabelInfoGain perLabelInfoGain = new PerLabelInfoGain (trainList); /* AdaBoostTrainer adaTrainer = new AdaBoostTrainer (confidencePredictingClassifierTrainer, 10); Classifier ada = adaTrainer.train (confidencePredictionTraining); System.out.println ("Accuracy at predicting correct/incorrect in BOOSTING training = " + ada.getAccuracy(confidencePredictionTraining)); */ // print out most informative features /* InfoGain ig = new InfoGain (confidencePredictionTraining); for (int i = 0; i < ig.numLocations(); i++) logger.info ("InfoGain["+ig.getObjectAtRank(i)+"]="+ig.getValueAtRank(i)); */ this.classifier = new ConfidencePredictingClassifier (c, cpc); return classifier; // return new ConfidencePredictingClassifier (c, ada); } }
4,367
40.207547
144
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/WinnowTrainer.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Aron Culotta <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.classify; import cc.mallet.classify.Classifier; import cc.mallet.classify.Winnow; import cc.mallet.pipe.Pipe; import cc.mallet.types.Alphabet; import cc.mallet.types.FeatureSelection; import cc.mallet.types.FeatureVector; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.LabelVector; import cc.mallet.types.Labeling; /** * An implementation of the training methods of a * Winnow2 on-line classifier. Given an instance xi, * the algorithm computes Sum(xi*wi), where wi is * the weight for that feature in the given class. * If the Sum is greater than some threshold * {@link #theta theta}, then the classifier guess * true for that class. * Only when the classifier makes a mistake are the * weights updated in one of two steps: * Promote: guessed 0 and answer was 1. Multiply * all weights of present features by {@link #alpha alpha}. * Demote: guessed 1 and answer was 0. Divide * all weights of present features by {@link #beta beta}. * * Limitations: Winnow2 only considers binary feature * vectors (i.e. whether or not the feature is present, * not its value). */ public class WinnowTrainer extends ClassifierTrainer<Winnow> { static final double DEFAULT_ALPHA = 2.0; static final double DEFAULT_BETA = 2.0; static final double DEFAULT_NFACTOR = .5; /** *constant to multiply to "correct" weights in promotion step */ double alpha; /** *constant to divide "incorrect" weights by in demotion step */ double beta; /** *threshold for sum of wi*xi in formulating guess */ double theta; /** *factor of n to set theta to. e.g. if n=1/2, theta = n/2. */ double nfactor; /** *array of weights, one for each feature, initialized to 1 */ double [][] weights; Winnow classifier; /** * Default constructor. Sets all features to defaults. */ public WinnowTrainer(){ this(DEFAULT_ALPHA, DEFAULT_BETA, DEFAULT_NFACTOR); } /** * Sets alpha and beta and default value for theta * @param a alpha value * @param b beta value */ public WinnowTrainer(double a, double b){ this(a, b, DEFAULT_NFACTOR); } /** * Sets alpha, beta, and nfactor * @param a alpha value * @param b beta value * @param nfact nfactor value */ public WinnowTrainer(double a, double b, double nfact){ this.alpha = a; this.beta = b; this.nfactor = nfact; } public Winnow getClassifier () { return classifier; } /** * Trains winnow on the instance list, updating * {@link #weights weights} according to errors * @param ilist Instance list to be trained on * @return Classifier object containing learned weights */ public Winnow train (InstanceList trainingList) { FeatureSelection selectedFeatures = trainingList.getFeatureSelection(); if (selectedFeatures != null) // xxx Attend to FeatureSelection!!! throw new UnsupportedOperationException ("FeatureSelection not yet implemented."); // if "train" is run more than once, // we will be reinitializing the weights // TODO: provide method to save weights trainingList.getDataAlphabet().stopGrowth(); trainingList.getTargetAlphabet().stopGrowth(); Pipe dataPipe = trainingList.getPipe (); Alphabet dict = (Alphabet) trainingList.getDataAlphabet (); int numLabels = trainingList.getTargetAlphabet().size(); int numFeats = dict.size(); this.theta = numFeats * this.nfactor; this.weights = new double [numLabels][numFeats]; // init weights to 1 for(int i=0; i<numLabels; i++) for(int j=0; j<numFeats; j++) this.weights[i][j] = 1.0; //System.out.println("Init weights to 1. Theta= "+theta); // loop through all instances for (int ii = 0; ii < trainingList.size(); ii++){ Instance inst = (Instance) trainingList.get(ii); Labeling labeling = inst.getLabeling (); FeatureVector fv = (FeatureVector) inst.getData (); double[] results = new double [numLabels]; int fvisize = fv.numLocations(); int correctIndex = labeling.getBestIndex(); for(int rpos=0; rpos < numLabels; rpos++) results[rpos]=0; // sum up xi*wi for each class for(int fvi=0; fvi < fvisize; fvi++){ int fi = fv.indexAtLocation(fvi); //System.out.println("feature index "+fi); for(int lpos=0; lpos < numLabels; lpos++) results[lpos] += this.weights[lpos][fi]; } //System.out.println("In instance " + ii); // make guess for each label using threshold // update weights according to alpha and beta // upon incorrect guess for(int ri=0; ri < numLabels; ri++){ if(results[ri] > this.theta){ // guess 1 if(correctIndex != ri) // correct is 0 demote(ri, fv); } else{ // guess 0 if(correctIndex == ri) // correct is 1 promote(ri, fv); } } // System.out.println("Results guessed:") // for(int x=0; x<numLabels; x++) // System.out.println(results[x]); // System.out.println("Correct label: "+correctIndex ); // System.out.println("Weights are"); // for(int h=0; h<numLabels; h++){ // for(int g=0; g<numFeats; g++) // System.out.println(weights[h][g]); // System.out.println(""); // } } classifier = new Winnow (dataPipe, weights, theta, numLabels, numFeats); return classifier; } /** * Promotes (by {@link #alpha alpha}) the weights * responsible for the incorrect guess * @param lpos index of incorrectly guessed label * @param fv feature vector */ private void promote(int lpos, FeatureVector fv){ int fvisize = fv.numLocations(); // learner predicted 0, correct is 1 -> promotion for(int fvi=0; fvi < fvisize; fvi++){ int fi = fv.indexAtLocation(fvi); this.weights[lpos][fi] *= this.alpha; } } /** *Demotes (by {@link #beta beta) the weights * responsible for the incorrect guess * @param lpos index of incorrectly guessed label * @param fv feature vector */ private void demote(int lpos, FeatureVector fv){ int fvisize = fv.numLocations(); // learner predicted 1, correct is 0 -> demotion for(int fvi=0; fvi < fvisize; fvi++){ int fi = fv.indexAtLocation(fvi); this.weights[lpos][fi] /= this.beta; } } }
6,657
30.704762
86
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/ClassifierEvaluator.java
package cc.mallet.classify; import java.util.logging.Logger; import cc.mallet.fst.TransducerEvaluator; import cc.mallet.fst.TransducerTrainer; import cc.mallet.optimize.Optimizable; import cc.mallet.types.InstanceList; import cc.mallet.util.MalletLogger; public abstract class ClassifierEvaluator { private static Logger logger = MalletLogger.getLogger(ClassifierEvaluator.class.getName()); InstanceList[] instanceLists; String[] instanceListDescriptions; public ClassifierEvaluator (InstanceList[] instanceLists, String[] instanceListDescriptions) { this.instanceLists = instanceLists; this.instanceListDescriptions = instanceListDescriptions; } public ClassifierEvaluator (InstanceList instanceList1, String instanceListDescription1) { this(new InstanceList[] {instanceList1}, new String[] {instanceListDescription1}); } public ClassifierEvaluator (InstanceList instanceList1, String instanceListDescription1, InstanceList instanceList2, String instanceListDescription2) { this(new InstanceList[] {instanceList1, instanceList2}, new String[] {instanceListDescription1, instanceListDescription2}); } public ClassifierEvaluator (InstanceList instanceList1, String instanceListDescription1, InstanceList instanceList2, String instanceListDescription2, InstanceList instanceList3, String instanceListDescription3) { this(new InstanceList[] {instanceList1, instanceList2, instanceList3}, new String[] {instanceListDescription1, instanceListDescription2, instanceListDescription3}); } /** * Evaluates a ClassifierTrainer and its Classifier on the instance lists specified in the constructor. . * <P> * The default implementation calls the evaluator's <TT>evaluateInstanceList</TT> on each instance list. * * @param ct The TransducerTrainer to evaluate. */ public void evaluate (ClassifierTrainer ct) { this.preamble(ct); for (int k = 0; k < instanceLists.length; k++) if (instanceLists[k] != null) evaluateInstanceList (ct, instanceLists[k], instanceListDescriptions[k]); } protected void preamble (ClassifierTrainer ct) { if (ct instanceof ClassifierTrainer.ByOptimization) { Optimizable opt; int iteration = ((ClassifierTrainer.ByOptimization)ct).getIteration(); if ((opt = ((ClassifierTrainer.ByOptimization)ct).getOptimizer().getOptimizable()) instanceof Optimizable.ByValue) logger.info ("Evaluator iteration="+iteration+" cost="+((Optimizable.ByValue)opt).getValue()); else logger.info ("Evaluator iteration="+iteration+" cost=NA (not Optimizable.ByValue)"); } } public abstract void evaluateInstanceList (ClassifierTrainer trainer, InstanceList instances, String description); }
2,713
38.333333
125
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/MCMaxEnt.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; import cc.mallet.pipe.Pipe; import cc.mallet.types.Alphabet; import cc.mallet.types.DenseVector; import cc.mallet.types.FeatureSelection; import cc.mallet.types.FeatureVector; import cc.mallet.types.Instance; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.LabelVector; import cc.mallet.types.MatrixOps; /** * Maximum Entropy classifier. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class MCMaxEnt extends Classifier implements Serializable { double [] parameters; // indexed by <labelIndex,featureIndex> int defaultFeatureIndex; FeatureSelection featureSelection; FeatureSelection[] perClassFeatureSelection; // The default feature is always the feature with highest index public MCMaxEnt (Pipe dataPipe, double[] parameters, FeatureSelection featureSelection, FeatureSelection[] perClassFeatureSelection) { super (dataPipe); assert (featureSelection == null || perClassFeatureSelection == null); this.parameters = parameters; this.featureSelection = featureSelection; this.perClassFeatureSelection = perClassFeatureSelection; this.defaultFeatureIndex = dataPipe.getDataAlphabet().size(); // assert (parameters.getNumCols() == defaultFeatureIndex+1); } public MCMaxEnt (Pipe dataPipe, double[] parameters, FeatureSelection featureSelection) { this (dataPipe, parameters, featureSelection, null); } public MCMaxEnt (Pipe dataPipe, double[] parameters, FeatureSelection[] perClassFeatureSelection) { this (dataPipe, parameters, null, perClassFeatureSelection); } public MCMaxEnt (Pipe dataPipe, double[] parameters) { this (dataPipe, parameters, null, null); } public double[] getParameters () { return parameters; } public void setParameter (int classIndex, int featureIndex, double value) { parameters[classIndex*(getAlphabet().size()+1) + featureIndex] = value; } public void getUnnormalizedClassificationScores (Instance instance, double[] scores) { // arrayOutOfBounds if pipe has grown since training // int numFeatures = getAlphabet().size() + 1; int numFeatures = this.defaultFeatureIndex + 1; int numLabels = getLabelAlphabet().size(); assert (scores.length == numLabels); FeatureVector fv = (FeatureVector) instance.getData (); // Make sure the feature vector's feature dictionary matches // what we are expecting from our data pipe (and thus our notion // of feature probabilities. assert (fv.getAlphabet () == this.instancePipe.getDataAlphabet ()); // Include the feature weights according to each label for (int li = 0; li < numLabels; li++) { scores[li] = parameters[li*numFeatures + defaultFeatureIndex] + MatrixOps.rowDotProduct (parameters, numFeatures, li, fv, defaultFeatureIndex, (perClassFeatureSelection == null ? featureSelection : perClassFeatureSelection[li])); } } public void getClassificationScores (Instance instance, double[] scores) { int numLabels = getLabelAlphabet().size(); assert (scores.length == numLabels); FeatureVector fv = (FeatureVector) instance.getData (); // Make sure the feature vector's feature dictionary matches // what we are expecting from our data pipe (and thus our notion // of feature probabilities. assert (instancePipe == null || fv.getAlphabet () == this.instancePipe.getDataAlphabet ()); // arrayOutOfBounds if pipe has grown since training // int numFeatures = getAlphabet().size() + 1; int numFeatures = this.defaultFeatureIndex + 1; // Include the feature weights according to each label for (int li = 0; li < numLabels; li++) { scores[li] = parameters[li*numFeatures + defaultFeatureIndex] + MatrixOps.rowDotProduct (parameters, numFeatures, li, fv, defaultFeatureIndex, (perClassFeatureSelection == null ? featureSelection : perClassFeatureSelection[li])); // xxxNaN assert (!Double.isNaN(scores[li])) : "li="+li; } // Move scores to a range where exp() is accurate, and normalize double max = MatrixOps.max (scores); double sum = 0; for (int li = 0; li < numLabels; li++) sum += (scores[li] = Math.exp (scores[li] - max)); for (int li = 0; li < numLabels; li++) { scores[li] /= sum; // xxxNaN assert (!Double.isNaN(scores[li])); } } public Classification classify (Instance instance) { int numClasses = getLabelAlphabet().size(); double[] scores = new double[numClasses]; getClassificationScores (instance, scores); // Create and return a Classification object return new Classification (instance, this, new LabelVector (getLabelAlphabet(), scores)); } public void print () { final Alphabet dict = getAlphabet(); final LabelAlphabet labelDict = getLabelAlphabet(); int numFeatures = dict.size() + 1; int numLabels = labelDict.size(); // Include the feature weights according to each label for (int li = 0; li < numLabels; li++) { System.out.println ("FEATURES FOR CLASS "+labelDict.lookupObject (li)); System.out.println (" <default> "+parameters [li*numFeatures + defaultFeatureIndex]); for (int i = 0; i < defaultFeatureIndex; i++) { Object name = dict.lookupObject (i); double weight = parameters [li*numFeatures + i]; System.out.println (" "+name+" "+weight); } } } private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; static final int NULL_INTEGER = -1; private void writeObject(ObjectOutputStream out) throws IOException { out.writeInt(CURRENT_SERIAL_VERSION); out.writeObject(getInstancePipe()); int np = parameters.length; out.writeInt(np); for (int p = 0; p < np; p++) out.writeDouble(parameters[p]); out.writeInt(defaultFeatureIndex); if (featureSelection == null) out.writeInt(NULL_INTEGER); else { out.writeInt(1); out.writeObject(featureSelection); } if (perClassFeatureSelection == null) out.writeInt(NULL_INTEGER); else { out.writeInt(perClassFeatureSelection.length); for (int i = 0; i < perClassFeatureSelection.length; i++) if (perClassFeatureSelection[i] == null) out.writeInt(NULL_INTEGER); else { out.writeInt(1); out.writeObject(perClassFeatureSelection[i]); } } } private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt(); if (version != CURRENT_SERIAL_VERSION) throw new ClassNotFoundException("Mismatched MCMaxEnt versions: wanted " + CURRENT_SERIAL_VERSION + ", got " + version); instancePipe = (Pipe) in.readObject(); int np = in.readInt(); parameters = new double[np]; for (int p = 0; p < np; p++) parameters[p] = in.readDouble(); defaultFeatureIndex = in.readInt(); int opt = in.readInt(); if (opt == 1) featureSelection = (FeatureSelection)in.readObject(); int nfs = in.readInt(); if (nfs >= 0) { perClassFeatureSelection = new FeatureSelection[nfs]; for (int i = 0; i < nfs; i++) { opt = in.readInt(); if (opt == 1) perClassFeatureSelection[i] = (FeatureSelection)in.readObject(); } } } }
9,046
35.926531
99
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/NaiveBayesEMTrainer.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.util.logging.Logger; import cc.mallet.pipe.Pipe; import cc.mallet.types.Alphabet; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.Multinomial; import cc.mallet.util.MalletLogger; /** * @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class NaiveBayesEMTrainer extends ClassifierTrainer<NaiveBayes> { private static Logger logger = MalletLogger.getLogger(MCMaxEntTrainer.class.getName()); Multinomial.Estimator featureEstimator = new Multinomial.LaplaceEstimator(); Multinomial.Estimator priorEstimator = new Multinomial.LaplaceEstimator(); double docLengthNormalization = -1; double unlabeledDataWeight = 1.0; int iteration = 0; NaiveBayesTrainer.Factory nbTrainer; NaiveBayes classifier; public NaiveBayesEMTrainer () { nbTrainer = new NaiveBayesTrainer.Factory (); nbTrainer.setDocLengthNormalization(docLengthNormalization); nbTrainer.setFeatureMultinomialEstimator(featureEstimator); nbTrainer.setPriorMultinomialEstimator (priorEstimator); } public Multinomial.Estimator getFeatureMultinomialEstimator () { return featureEstimator; } public void setFeatureMultinomialEstimator (Multinomial.Estimator me) { featureEstimator = me; nbTrainer.setFeatureMultinomialEstimator(featureEstimator); } public Multinomial.Estimator getPriorMultinomialEstimator () { return priorEstimator; } public void setPriorMultinomialEstimator (Multinomial.Estimator me) { priorEstimator = me; nbTrainer.setPriorMultinomialEstimator(priorEstimator); } public void setDocLengthNormalization (double d) { docLengthNormalization = d; nbTrainer.setDocLengthNormalization(docLengthNormalization); } public double getDocLengthNormalization () { return docLengthNormalization; } public double getUnlabeledDataWeight () { return unlabeledDataWeight; } public void setUnlabeledDataWeight (double unlabeledDataWeight) { this.unlabeledDataWeight = unlabeledDataWeight; } public int getIteration() { return iteration; } public boolean isFinishedTraining() { return false; } public NaiveBayes getClassifier() { return classifier; } public NaiveBayes train (InstanceList trainingSet) { // Get a classifier trained on the labeled examples only NaiveBayes c = (NaiveBayes) nbTrainer.newClassifierTrainer().train (trainingSet); double prevLogLikelihood = 0, logLikelihood = 0; boolean converged = false; int iteration = 0; while (!converged) { // Make a new trainingSet that has some labels set InstanceList trainingSet2 = new InstanceList (trainingSet.getPipe()); for (int ii = 0; ii < trainingSet.size(); ii++) { Instance inst = trainingSet.get(ii); if (inst.getLabeling() != null) trainingSet2.add(inst, 1.0); else { Instance inst2 = inst.shallowCopy(); inst2.unLock(); inst2.setLabeling(c.classify(inst).getLabeling()); inst2.lock(); trainingSet2.add(inst2, unlabeledDataWeight); } } c = (NaiveBayes) nbTrainer.newClassifierTrainer().train (trainingSet2); logLikelihood = c.dataLogLikelihood (trainingSet2); System.err.println ("Loglikelihood = "+logLikelihood); // Wait for a change in log-likelihood of less than 0.01% and at least 10 iterations if (Math.abs((logLikelihood - prevLogLikelihood)/logLikelihood) < 0.0001) converged = true; prevLogLikelihood = logLikelihood; iteration++; } return c; } public String toString() { String ret = "NaiveBayesEMTrainer"; if (docLengthNormalization != 1.0) ret += ",docLengthNormalization="+docLengthNormalization; if (unlabeledDataWeight != 1.0) ret += ",unlabeledDataWeight="+unlabeledDataWeight; return ret; } // Serialization // serialVersionUID is overriden to prevent innocuous changes in this // class from making the serialization mechanism think the external // format has changed. private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject(ObjectOutputStream out) throws IOException { out.writeInt(CURRENT_SERIAL_VERSION); //default selections for the kind of Estimator used out.writeObject(featureEstimator); out.writeObject(priorEstimator); } private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt(); if (version != CURRENT_SERIAL_VERSION) throw new ClassNotFoundException("Mismatched NaiveBayesTrainer versions: wanted " + CURRENT_SERIAL_VERSION + ", got " + version); //default selections for the kind of Estimator used featureEstimator = (Multinomial.Estimator) in.readObject(); priorEstimator = (Multinomial.Estimator) in.readObject(); } }
5,559
33.320988
95
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/ClassifierEnsembleTrainer.java
package cc.mallet.classify; import cc.mallet.types.InstanceList; /* Copyright (C) 2005 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** * @author <a href="mailto:[email protected]">Andrew McCallum</a> */ public class ClassifierEnsembleTrainer extends ClassifierTrainer<ClassifierEnsemble> { Classifier[] classifiers; ClassifierEnsemble classifier; public ClassifierEnsemble getClassifier () { return classifier; } public ClassifierEnsembleTrainer (Classifier[] classifiers) { this.classifiers = (Classifier[]) classifiers.clone(); } public ClassifierEnsemble train (InstanceList trainingSet) { //if (initialClassifier != null) throw new IllegalArgumentException("initialClassifier not yet supported"); // Make an instance list, with features being the outputs of the ensemble classifiers //return null; throw new IllegalStateException ("Not yet implemented."); } }
1,261
36.117647
111
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/MaxEntOptimizableByGE.java
package cc.mallet.classify; import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.logging.Logger; import cc.mallet.optimize.Optimizable; import cc.mallet.types.FeatureVector; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.MatrixOps; import cc.mallet.util.MalletProgressMessageLogger; import cc.mallet.util.Maths; /** * Training of MaxEnt models with labeled features using * Generalized Expectation Criteria. * * Based on: * "Learning from Labeled Features using Generalized Expectation Criteria" * Gregory Druck, Gideon Mann, Andrew McCallum * SIGIR 2008 * * @author Gregory Druck <a href="mailto:[email protected]">[email protected]</a> */ /** * @author gdruck * */ public class MaxEntOptimizableByGE implements Optimizable.ByGradientValue { private static Logger progressLogger = MalletProgressMessageLogger.getLogger(MaxEntOptimizableByLabelLikelihood.class.getName()+"-pl"); private boolean cacheStale = true; private boolean useValues; private int defaultFeatureIndex; private double temperature; private double objWeight; private double cachedValue; private double gaussianPriorVariance; private double[] cachedGradient; private double[] parameters; private InstanceList trainingList; private MaxEnt classifier; private HashMap<Integer,double[]> constraints; private HashMap<Integer,Integer> mapping; /** * @param trainingList List with unlabeled training instances. * @param constraints Feature expectation constraints. * @param initClassifier Initial classifier. */ public MaxEntOptimizableByGE(InstanceList trainingList, HashMap<Integer,double[]> constraints, MaxEnt initClassifier) { useValues = false; temperature = 1.0; objWeight = 1.0; this.trainingList = trainingList; int numFeatures = trainingList.getDataAlphabet().size(); defaultFeatureIndex = numFeatures; int numLabels = trainingList.getTargetAlphabet().size(); parameters = new double[(numFeatures + 1) * numLabels]; cachedGradient = new double[(numFeatures + 1) * numLabels]; cachedValue = 0; if (classifier != null) { this.classifier = initClassifier; } else { this.classifier = new MaxEnt(trainingList.getPipe(),parameters); } this.constraints = constraints; } /** * Sets the variance for Gaussian prior or * equivalently the inverse of the weight * of the L2 regularization term. * * @param variance Gaussian prior variance. */ public void setGaussianPriorVariance(double variance) { this.gaussianPriorVariance = variance; } /** * Set the temperature, 1 / the exponent model predicted probabilities * are raised to when computing model expectations. As the temperature * increases, model probabilities approach 1 for the maximum probability * class, and 0 for other classes. DEFAULT: 1 * * @param temp Temperature. */ public void setTemperature(double temp) { this.temperature = temp; } /** * The weight of GE term in the objective function. * * @param weight GE term weight. */ public void setWeight(double weight) { this.objWeight = weight; } public MaxEnt getClassifier() { return classifier; } public double getValue() { if (!cacheStale) { return cachedValue; } if (objWeight == 0) { return 0.0; } Arrays.fill(cachedGradient,0); int numRefDist = constraints.size(); int numFeatures = trainingList.getDataAlphabet().size() + 1; int numLabels = trainingList.getTargetAlphabet().size(); double scalingFactor = objWeight; if (mapping == null) { // mapping maps between feature indices to // constraint indices setMapping(); } double[][] modelExpectations = new double[numRefDist][numLabels]; double[][] ratio = new double[numRefDist][numLabels]; double[] featureCounts = new double[numRefDist]; double[][] scores = new double[trainingList.size()][numLabels]; // pass 1: calculate model distribution for (int ii = 0; ii < trainingList.size(); ii++) { Instance instance = trainingList.get(ii); double instanceWeight = trainingList.getInstanceWeight(instance); // skip if labeled if (instance.getTarget() != null) { continue; } FeatureVector fv = (FeatureVector) instance.getData(); classifier.getClassificationScoresWithTemperature(instance, temperature, scores[ii]); for (int loc = 0; loc < fv.numLocations(); loc++) { int featureIndex = fv.indexAtLocation(loc); if (constraints.containsKey(featureIndex)) { int cIndex = mapping.get(featureIndex); double val; if (!useValues) { val = 1.; } else { val = fv.valueAtLocation(loc); } featureCounts[cIndex] += val; for (int l = 0; l < numLabels; l++) { modelExpectations[cIndex][l] += scores[ii][l] * val * instanceWeight; } } } // special case of label regularization if (constraints.containsKey(defaultFeatureIndex)) { int cIndex = mapping.get(defaultFeatureIndex); featureCounts[cIndex] += 1; for (int l = 0; l < numLabels; l++) { modelExpectations[cIndex][l] += scores[ii][l] * instanceWeight; } } } double value = 0; for (int featureIndex : constraints.keySet()) { int cIndex = mapping.get(featureIndex); if (featureCounts[cIndex] > 0) { for (int label = 0; label < numLabels; label++) { double cProb = constraints.get(featureIndex)[label]; // normalize by count modelExpectations[cIndex][label] /= featureCounts[cIndex]; ratio[cIndex][label] = cProb / modelExpectations[cIndex][label]; // add to the cross entropy term value += scalingFactor * cProb * Math.log(modelExpectations[cIndex][label]); // add to the entropy term if (cProb > 0) { value -= scalingFactor * cProb * Math.log(cProb); } } assert(Maths.almostEquals(MatrixOps.sum(modelExpectations[cIndex]),1)); } } // pass 2: determine per example gradient for (int ii = 0; ii < trainingList.size(); ii++) { Instance instance = trainingList.get(ii); // skip if labeled if (instance.getTarget() != null) { continue; } double instanceWeight = trainingList.getInstanceWeight(instance); FeatureVector fv = (FeatureVector) instance.getData(); for (int loc = 0; loc < fv.numLocations() + 1; loc++) { int featureIndex; if (loc == fv.numLocations()) { featureIndex = defaultFeatureIndex; } else { featureIndex = fv.indexAtLocation(loc); } if (constraints.containsKey(featureIndex)) { int cIndex = mapping.get(featureIndex); // skip if this feature never occurred if (featureCounts[cIndex] == 0) { continue; } double val; if ((featureIndex == defaultFeatureIndex)||(!useValues)) { val = 1; } else { val = fv.valueAtLocation(loc); } // compute \sum_y p(y|x) \hat{g}_y / \bar{g}_y double instanceExpectation = 0; for (int label = 0; label < numLabels; label++) { instanceExpectation += ratio[cIndex][label] * scores[ii][label]; } // define C = \sum_y p(y|x) g_y(y,x) \hat{g}_y / \bar{g}_y // compute \sum_y p(y|x) g_y(x,y) f(x,y) * (\hat{g}_y / \bar{g}_y - C) for (int label = 0; label < numLabels; label++) { if (scores[ii][label] == 0) continue; assert (!Double.isInfinite(scores[ii][label])); double weight = scalingFactor * instanceWeight * temperature * (val / featureCounts[cIndex]) * scores[ii][label] * (ratio[cIndex][label] - instanceExpectation); MatrixOps.rowPlusEquals(cachedGradient, numFeatures, label, fv, weight); cachedGradient[numFeatures * label + defaultFeatureIndex] += weight; } } } } cachedValue = value; cacheStale = false; double reg = getRegularization(); progressLogger.info ("Value (GE=" + value + " Gaussian prior= " + reg + ") = " + cachedValue); return value; } private double getRegularization() { double regularization; if (!Double.isInfinite(gaussianPriorVariance)) { regularization = Math.log(gaussianPriorVariance * Math.sqrt(2 * Math.PI)); } else { regularization = 0; } for (int pi = 0; pi < parameters.length; pi++) { double p = parameters[pi]; regularization -= p * p / (2 * gaussianPriorVariance); cachedGradient[pi] -= p / gaussianPriorVariance; } cachedValue += regularization; return regularization; } public void getValueGradient(double[] buffer) { if (cacheStale) { getValue(); } assert(buffer.length == cachedGradient.length); for (int i = 0; i < buffer.length; i++) { buffer[i] = cachedGradient[i]; } } public int getNumParameters() { return parameters.length; } public double getParameter(int index) { return parameters[index]; } public void getParameters(double[] buffer) { assert(buffer.length == parameters.length); System.arraycopy (parameters, 0, buffer, 0, buffer.length); } public void setParameter(int index, double value) { cacheStale = true; parameters[index] = value; } public void setParameters(double[] params) { assert(params.length == parameters.length); cacheStale = true; System.arraycopy (params, 0, parameters, 0, parameters.length); } private void setMapping() { int cCounter = 0; mapping = new HashMap<Integer,Integer>(); Iterator<Integer> keys = constraints.keySet().iterator(); while (keys.hasNext()) { int featureIndex = keys.next(); mapping.put(featureIndex, cCounter); cCounter++; } } }
10,367
30.228916
172
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/examples/DocumentClassifier.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Takes a list of directory names as arguments, (each directory should contain all the text files for each class), performs a random train/test split, trains a classifier, and outputs accuracy on the testing and training sets. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.classify.examples; import java.io.*; import cc.mallet.classify.*; import cc.mallet.pipe.*; import cc.mallet.pipe.iterator.*; import cc.mallet.types.*; public class DocumentClassifier { static public void main (String[] args) { // Create Java File objects for each of the arguments File[] directories = new File[args.length]; for (int i = 0; i < args.length; i++) directories[i] = new File (args[i]); // Create the pipeline that will take as input {data = File, target = String for classname} // and turn them into {data = FeatureVector, target = Label} Pipe instancePipe = new SerialPipes (new Pipe[] { new Target2Label (), // Target String -> class label new Input2CharSequence (), // Data File -> String containing contents new CharSubsequence (CharSubsequence.SKIP_HEADER), // Remove UseNet or email header new CharSequence2TokenSequence (), // Data String -> TokenSequence new TokenSequenceLowercase (), // TokenSequence words lowercased new TokenSequenceRemoveStopwords (),// Remove stopwords from sequence new TokenSequence2FeatureSequence(),// Replace each Token with a feature index new FeatureSequence2FeatureVector(),// Collapse word order into a "feature vector" new PrintInputAndTarget(), }); // Create an empty list of the training instances InstanceList ilist = new InstanceList (instancePipe); // Add all the files in the directories to the list of instances. // The Instance that goes into the beginning of the instancePipe // will have a File in the "data" slot, and a string from args[] in the "target" slot. ilist.addThruPipe (new FileIterator (directories, FileIterator.STARTING_DIRECTORIES)); // Make a test/train split; ilists[0] will be for training; ilists[1] will be for testing InstanceList[] ilists = ilist.split (new double[] {.5, .5}); // Create a classifier trainer, and use it to create a classifier ClassifierTrainer naiveBayesTrainer = new NaiveBayesTrainer (); Classifier classifier = naiveBayesTrainer.train (ilists[0]); System.out.println ("The training accuracy is "+ classifier.getAccuracy (ilists[0])); System.out.println ("The testing accuracy is "+ classifier.getAccuracy (ilists[1])); } }
3,004
41.323944
93
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/tests/TestMaxEntTrainer.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.classify.tests; import junit.framework.*; import java.net.URI; import cc.mallet.classify.*; import cc.mallet.optimize.Optimizable; import cc.mallet.optimize.tests.TestOptimizable; import cc.mallet.pipe.*; import cc.mallet.pipe.iterator.ArrayIterator; import cc.mallet.types.*; import cc.mallet.util.*; public class TestMaxEntTrainer extends TestCase { public TestMaxEntTrainer (String name) { super (name); } private static Alphabet dictOfSize (int size) { Alphabet ret = new Alphabet (); for (int i = 0; i < size; i++) ret.lookupIndex ("feature"+i); return ret; } public void testSetGetParameters () { MaxEntTrainer trainer = new MaxEntTrainer(); Alphabet fd = dictOfSize (6); String[] classNames = new String[] {"class0", "class1", "class2"}; InstanceList ilist = new InstanceList (new Randoms(1), fd, classNames, 20); Optimizable.ByGradientValue maxable = trainer.getOptimizable (ilist); TestOptimizable.testGetSetParameters (maxable); } public void testRandomMaximizable () { MaxEntTrainer trainer = new MaxEntTrainer(); Alphabet fd = dictOfSize (6); String[] classNames = new String[] {"class0", "class1"}; InstanceList ilist = new InstanceList (new Randoms(1), fd, classNames, 20); Optimizable.ByGradientValue maxable = trainer.getOptimizable (ilist); TestOptimizable.testValueAndGradient (maxable); } // TODO This doesn't pass, but it didn't in the old MALLET either. Why?? -akm 1/08 public void testTrainedMaximizable () { MaxEntTrainer trainer = new MaxEntTrainer(); Alphabet fd = dictOfSize (6); String[] classNames = new String[] {"class0", "class1"}; InstanceList ilist = new InstanceList (new Randoms(1), fd, classNames, 20); MaxEnt me = (MaxEnt)trainer.train(ilist); Optimizable.ByGradientValue maxable = trainer.getOptimizable (ilist, me); TestOptimizable.testValueAndGradientCurrentParameters (maxable); } public static Test suite () { return new TestSuite (TestMaxEntTrainer.class); } protected void setUp () { } public static void main (String[] args) { junit.textui.TestRunner.run (suite()); } }
2,652
28.153846
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/tests/TestStaticParameters.java
package cc.mallet.classify.tests; import junit.framework.Test; import junit.framework.TestCase; import junit.framework.TestSuite; public class TestStaticParameters extends TestCase { int gamma = 1; public TestStaticParameters () { } public static class Factory { protected static int gamma = 2; public TestStaticParameters newTSP () { System.out.println ("Factory gamma="+this.gamma); TestStaticParameters t = new TestStaticParameters(); t.gamma = this.gamma; return t; } } public void testParameterSetting () { Factory f = new Factory () {{gamma=3;}}; TestStaticParameters g = f.newTSP(); System.out.println ("g.gamma="+g.gamma); assertTrue("gamma="+g.gamma, g.gamma == 3); } public static Test suite () { return new TestSuite (TestClassifiers.class); } protected void setUp () { } public static void main (String[] args) { junit.textui.TestRunner.run (suite()); } }
926
20.068182
55
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/tests/TestClassifiers.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.classify.tests; //import edu.umass.cs.mallet.base.pipe.SerialPipe; import junit.framework.*; import java.net.URI; import java.util.Iterator; import cc.mallet.classify.*; import cc.mallet.pipe.*; import cc.mallet.pipe.iterator.ArrayIterator; import cc.mallet.pipe.iterator.PipeInputIterator; import cc.mallet.pipe.iterator.RandomTokenSequenceIterator; import cc.mallet.types.*; import cc.mallet.util.*; public class TestClassifiers extends TestCase { public TestClassifiers (String name) { super (name); } private static Alphabet dictOfSize (int size) { Alphabet ret = new Alphabet (); for (int i = 0; i < size; i++) ret.lookupIndex ("feature"+i); return ret; } public void testRandomTrained () { ClassifierTrainer[] trainers = new ClassifierTrainer[1]; //trainers[0] = new NaiveBayesTrainer(); trainers[0] = new MaxEntTrainer(); //trainers[2] = new DecisionTreeTrainer(); Alphabet fd = dictOfSize (3); String[] classNames = new String[] {"class0", "class1", "class2"}; InstanceList ilist = new InstanceList (new Randoms(1), fd, classNames, 200); InstanceList lists[] = ilist.split (new java.util.Random(2), new double[] {.5, .5}); //System.out.println ("Training set size = "+lists[0].size()); //System.out.println ("Testing set size = "+lists[1].size()); Classifier[] classifiers = new Classifier[trainers.length]; for (int i = 0; i < trainers.length; i++) classifiers[i] = trainers[i].train (lists[0]); System.out.println ("Accuracy on training set:"); for (int i = 0; i < trainers.length; i++) System.out.println (classifiers[i].getClass().getName() + ": " + new Trial (classifiers[i], lists[0]).getAccuracy()); System.out.println ("Accuracy on testing set:"); for (int i = 0; i < trainers.length; i++) System.out.println (classifiers[i].getClass().getName() + ": " + new Trial (classifiers[i], lists[1]).getAccuracy()); } public void testNewFeatures () { ClassifierTrainer[] trainers = new ClassifierTrainer[1]; trainers[0] = new MaxEntTrainer(); Alphabet fd = dictOfSize (3); String[] classNames = new String[] {"class0", "class1", "class2"}; Randoms r = new Randoms(1); InstanceList training = new InstanceList (r, fd, classNames, 50); expandDict (fd, 25); Classifier[] classifiers = new Classifier[trainers.length]; for (int i = 0; i < trainers.length; i++) classifiers[i] = trainers[i].train (training); System.out.println ("Accuracy on training set:"); for (int i = 0; i < trainers.length; i++) System.out.println (classifiers[i].getClass().getName() + ": " + new Trial (classifiers[i], training).getAccuracy()); InstanceList testing = new InstanceList (training.getPipe ()); Iterator<Instance> iter = new RandomTokenSequenceIterator ( r, new Dirichlet (fd, 2.0), 30, 0, 10, 50, classNames); testing.addThruPipe (iter); for (int i = 0; i < testing.size (); i++) { Instance inst = testing.get (i); System.out.println ("DATA:"+inst.getData()); } System.out.println ("Accuracy on testing set:"); for (int i = 0; i < trainers.length; i++) System.out.println (classifiers[i].getClass().getName() + ": " + new Trial (classifiers[i], testing).getAccuracy()); } private void expandDict (Alphabet fd, int size) { fd.startGrowth (); for (int i = 0; i < size; i++) fd.lookupIndex ("feature"+i, true); } public static Test suite () { return new TestSuite (TestClassifiers.class); } protected void setUp () { } public static void main (String[] args) { junit.textui.TestRunner.run (suite()); } }
4,300
29.942446
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/tests/TestNaiveBayes.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.classify.tests; import junit.framework.*; import java.net.URI; import java.io.File; import cc.mallet.classify.*; import cc.mallet.pipe.*; import cc.mallet.pipe.iterator.ArrayIterator; import cc.mallet.pipe.iterator.FileIterator; import cc.mallet.types.*; import cc.mallet.util.*; public class TestNaiveBayes extends TestCase { public TestNaiveBayes (String name) { super (name); } public void testNonTrained () { Alphabet fdict = new Alphabet (); System.out.println ("fdict.size="+fdict.size()); LabelAlphabet ldict = new LabelAlphabet (); Multinomial.Estimator me1 = new Multinomial.LaplaceEstimator (fdict); Multinomial.Estimator me2 = new Multinomial.LaplaceEstimator (fdict); // Prior ldict.lookupIndex ("sports"); ldict.lookupIndex ("politics"); ldict.stopGrowth (); System.out.println ("ldict.size="+ldict.size()); Multinomial prior = new Multinomial (new double[] {.5, .5}, ldict); // Sports me1.increment ("win", 5); me1.increment ("puck", 5); me1.increment ("team", 5); System.out.println ("fdict.size="+fdict.size()); // Politics me2.increment ("win", 5); me2.increment ("speech", 5); me2.increment ("vote", 5); Multinomial sports = me1.estimate(); Multinomial politics = me2.estimate(); // We must estimate from me1 and me2 after all data is incremented, // so that the "sports" multinomial knows the full dictionary size! Classifier c = new NaiveBayes (new Noop (fdict, ldict), prior, new Multinomial[] {sports, politics}); Instance inst = c.getInstancePipe().instanceFrom( new Instance (new FeatureVector (fdict, new Object[] {"speech", "win"}, new double[] {1, 1}), ldict.lookupLabel ("politics"), null, null)); System.out.println ("inst.data = "+inst.getData ()); Classification cf = c.classify (inst); LabelVector l = (LabelVector) cf.getLabeling(); //System.out.println ("l.size="+l.size()); System.out.println ("l.getBestIndex="+l.getBestIndex()); assertTrue (cf.getLabeling().getBestLabel() == ldict.lookupLabel("politics")); assertTrue (cf.getLabeling().getBestValue() > 0.6); } public void testStringTrained () { String[] africaTraining = new String[] { "on the plains of africa the lions roar", "in swahili ngoma means to dance", "nelson mandela became president of south africa", "the saraha dessert is expanding"}; String[] asiaTraining = new String[] { "panda bears eat bamboo", "china's one child policy has resulted in a surplus of boys", "tigers live in the jungle"}; InstanceList instances = new InstanceList ( new SerialPipes (new Pipe[] { new Target2Label (), new CharSequence2TokenSequence (), new TokenSequence2FeatureSequence (), new FeatureSequence2FeatureVector ()})); instances.addThruPipe (new ArrayIterator (africaTraining, "africa")); instances.addThruPipe (new ArrayIterator (asiaTraining, "asia")); Classifier c = new NaiveBayesTrainer ().train (instances); Classification cf = c.classify ("nelson mandela never eats lions"); assertTrue (cf.getLabeling().getBestLabel() == ((LabelAlphabet)instances.getTargetAlphabet()).lookupLabel("africa")); } public void testRandomTrained () { InstanceList ilist = new InstanceList (new Randoms(1), 10, 2); Classifier c = new NaiveBayesTrainer ().train (ilist); // test on the training data int numCorrect = 0; for (int i = 0; i < ilist.size(); i++) { Instance inst = ilist.get(i); Classification cf = c.classify (inst); cf.print (); if (cf.getLabeling().getBestLabel() == inst.getLabeling().getBestLabel()) numCorrect++; } System.out.println ("Accuracy on training set = " + ((double)numCorrect)/ilist.size()); } public void testIncrementallyTrainedGrowingAlphabets() { System.out.println("testIncrementallyTrainedGrowingAlphabets"); String[] args = new String[] { "src/cc/mallet/classify/tests/NaiveBayesData/learn/a", "src/cc/mallet/classify/tests/NaiveBayesData/learn/b" }; File[] directories = new File[args.length]; for (int i = 0; i < args.length; i++) directories[i] = new File (args[i]); SerialPipes instPipe = // MALLET pipeline for converting instances to feature vectors new SerialPipes(new Pipe[] { new Target2Label(), new Input2CharSequence(), //SKIP_HEADER only works for Unix //new CharSubsequence(CharSubsequence.SKIP_HEADER), new CharSequence2TokenSequence(), new TokenSequenceLowercase(), new TokenSequenceRemoveStopwords(), new TokenSequence2FeatureSequence(), new FeatureSequence2FeatureVector() }); InstanceList instList = new InstanceList(instPipe); instList.addThruPipe(new FileIterator(directories, FileIterator.STARTING_DIRECTORIES)); System.out.println("Training 1"); NaiveBayesTrainer trainer = new NaiveBayesTrainer(); NaiveBayes classifier = trainer.trainIncremental(instList); //instList.getDataAlphabet().stopGrowth(); // incrementally train... String[] t2directories = { "src/cc/mallet/classify/tests/NaiveBayesData/learn/b" }; System.out.println("data alphabet size " + instList.getDataAlphabet().size()); System.out.println("target alphabet size " + instList.getTargetAlphabet().size()); InstanceList instList2 = new InstanceList(instPipe); instList2.addThruPipe(new FileIterator(t2directories, FileIterator.STARTING_DIRECTORIES)); System.out.println("Training 2"); System.out.println("data alphabet size " + instList2.getDataAlphabet().size()); System.out.println("target alphabet size " + instList2.getTargetAlphabet().size()); NaiveBayes classifier2 = (NaiveBayes) trainer.trainIncremental(instList2); } public void testIncrementallyTrained() { System.out.println("testIncrementallyTrained"); String[] args = new String[] { "src/cc/mallet/classify/tests/NaiveBayesData/learn/a", "src/cc/mallet/classify/tests/NaiveBayesData/learn/b" }; File[] directories = new File[args.length]; for (int i = 0; i < args.length; i++) directories[i] = new File (args[i]); SerialPipes instPipe = // MALLET pipeline for converting instances to feature vectors new SerialPipes(new Pipe[] { new Target2Label(), new Input2CharSequence(), //SKIP_HEADER only works for Unix //new CharSubsequence(CharSubsequence.SKIP_HEADER), new CharSequence2TokenSequence(), new TokenSequenceLowercase(), new TokenSequenceRemoveStopwords(), new TokenSequence2FeatureSequence(), new FeatureSequence2FeatureVector() }); InstanceList instList = new InstanceList(instPipe); instList.addThruPipe(new FileIterator(directories, FileIterator.STARTING_DIRECTORIES)); System.out.println("Training 1"); NaiveBayesTrainer trainer = new NaiveBayesTrainer(); NaiveBayes classifier = (NaiveBayes) trainer.trainIncremental(instList); Classification initialClassification = classifier.classify("Hello Everybody"); Classification initial2Classification = classifier.classify("Goodbye now"); System.out.println("Initial Classification = "); initialClassification.print(); initial2Classification.print(); System.out.println("data alphabet " + classifier.getAlphabet()); System.out.println("label alphabet " + classifier.getLabelAlphabet()); // incrementally train... String[] t2directories = { "src/cc/mallet/classify/tests/NaiveBayesData/learn/b" }; System.out.println("data alphabet size " + instList.getDataAlphabet().size()); System.out.println("target alphabet size " + instList.getTargetAlphabet().size()); InstanceList instList2 = new InstanceList(instPipe); instList2.addThruPipe(new FileIterator(t2directories, FileIterator.STARTING_DIRECTORIES)); System.out.println("Training 2"); System.out.println("data alphabet size " + instList2.getDataAlphabet().size()); System.out.println("target alphabet size " + instList2.getTargetAlphabet().size()); NaiveBayes classifier2 = (NaiveBayes) trainer.trainIncremental(instList2); } public void testEmptyStringBug() { System.out.println("testEmptyStringBug"); String[] args = new String[] { "src/cc/mallet/classify/tests/NaiveBayesData/learn/a", "src/cc/mallet/classify/tests/NaiveBayesData/learn/b" }; File[] directories = new File[args.length]; for (int i = 0; i < args.length; i++) directories[i] = new File (args[i]); SerialPipes instPipe = // MALLET pipeline for converting instances to feature vectors new SerialPipes(new Pipe[] { new Target2Label(), new Input2CharSequence(), //SKIP_HEADER only works for Unix //new CharSubsequence(CharSubsequence.SKIP_HEADER), new CharSequence2TokenSequence(), new TokenSequenceLowercase(), new TokenSequenceRemoveStopwords(), new TokenSequence2FeatureSequence(), new FeatureSequence2FeatureVector() }); InstanceList instList = new InstanceList(instPipe); instList.addThruPipe(new FileIterator(directories, FileIterator.STARTING_DIRECTORIES)); System.out.println("Training 1"); NaiveBayesTrainer trainer = new NaiveBayesTrainer(); NaiveBayes classifier = (NaiveBayes) trainer.trainIncremental(instList); Classification initialClassification = classifier.classify("Hello Everybody"); Classification initial2Classification = classifier.classify("Goodbye now"); System.out.println("Initial Classification = "); initialClassification.print(); initial2Classification.print(); System.out.println("data alphabet " + classifier.getAlphabet()); System.out.println("label alphabet " + classifier.getLabelAlphabet()); // test String[] t2directories = { "src/cc/mallet/classify/tests/NaiveBayesData/learn/b" }; System.out.println("data alphabet size " + instList.getDataAlphabet().size()); System.out.println("target alphabet size " + instList.getTargetAlphabet().size()); InstanceList instList2 = new InstanceList(instPipe); instList2.addThruPipe(new FileIterator(t2directories, FileIterator.STARTING_DIRECTORIES, true)); System.out.println("Training 2"); System.out.println("data alphabet size " + instList2.getDataAlphabet().size()); System.out.println("target alphabet size " + instList2.getTargetAlphabet().size()); NaiveBayes classifier2 = (NaiveBayes) trainer.trainIncremental(instList2); Classification secondClassification = classifier.classify("Goodbye now"); secondClassification.print(); } static Test suite () { return new TestSuite (TestNaiveBayes.class); //TestSuite suite= new TestSuite(); // //suite.addTest(new TestNaiveBayes("testIncrementallyTrained")); // suite.addTest(new TestNaiveBayes("testEmptyStringBug")); // return suite; } protected void setUp () { } public static void main (String[] args) { junit.textui.TestRunner.run (suite()); } }
11,366
32.72997
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/tui/Vectors2Classify.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify.tui; import java.io.*; import java.util.*; import java.util.Random; import java.util.logging.*; import java.lang.reflect.*; import cc.mallet.classify.*; import cc.mallet.classify.evaluate.*; import cc.mallet.types.*; import cc.mallet.util.*; /** * Classify documents, run trials, print statistics from a vector file. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public abstract class Vectors2Classify { private static Logger logger = MalletLogger.getLogger(Vectors2Classify.class.getName()); private static Logger progressLogger = MalletProgressMessageLogger.getLogger(Vectors2Classify.class.getName() + "-pl"); private static ArrayList<ClassifierTrainer> classifierTrainers = new ArrayList<ClassifierTrainer>(); private static boolean[][] ReportOptions = new boolean[3][4]; private static String[][] ReportOptionArgs = new String[3][4]; //arg in dataset:reportOption=arg // Essentially an enum mapping string names to enums to ints. private static class ReportOption { static final String[] dataOptions = {"train", "test", "validation"}; static final String[] reportOptions = {"accuracy", "f1", "confusion", "raw"}; static final int train=0; static final int test =1; static final int validation=2; static final int accuracy=0; static final int f1=1; static final int confusion=2; static final int raw=3; } static CommandOption.SpacedStrings report = new CommandOption.SpacedStrings (Vectors2Classify.class, "report", "[train|test|validation]:[accuracy|f1:label|confusion|raw]", true, new String[] {"test:accuracy", "test:confusion", "train:accuracy"}, "", null) { public void postParsing (CommandOption.List list) { java.lang.String defaultRawFormatting = "siw"; for (int argi=0; argi<this.value.length; argi++){ // convert options like --report train:accuracy --report test:f1=labelA to // boolean array of options. // first, split the argument at semicolon. //System.out.println(argi + " " + this.value[argi]); java.lang.String arg = this.value[argi]; java.lang.String fields[] = arg.split("[:=]"); java.lang.String dataSet = fields[0]; java.lang.String reportOption = fields[1]; java.lang.String reportOptionArg = null; if (fields.length >=3){ reportOptionArg = fields[2]; } //System.out.println("Report option arg " + reportOptionArg); //find the datasource (test,train,validation) boolean foundDataSource = false; int i=0; for (; i<ReportOption.dataOptions.length; i++){ if (dataSet.equals(ReportOption.dataOptions[i])){ foundDataSource = true; break; } } if (!foundDataSource){ throw new IllegalArgumentException("Unknown argument = " + dataSet + " in --report " + this.value[argi]); } //find the report option (accuracy, f1, confusion, raw) boolean foundReportOption = false; int j=0; for (; j<ReportOption.reportOptions.length; j++){ if (reportOption.equals(ReportOption.reportOptions[j])){ foundReportOption = true; break; } } if (!foundReportOption){ throw new IllegalArgumentException("Unknown argument = " + reportOption + " in --report " + this.value[argi]); } //Mark the (dataSet,reportionOption) pair as selected ReportOptions[i][j] = true; if (j == ReportOption.f1){ // make sure a label was specified for f1 if (reportOptionArg == null){ throw new IllegalArgumentException("F1 must have label argument in --report " + this.value[argi]); } // Pass through the string argument ReportOptionArgs[i][j]= reportOptionArg; }else if (reportOptionArg != null){ throw new IllegalArgumentException("No arguments after = allowed in --report " + this.value[argi]); } } } }; static CommandOption.Object trainerConstructor = new CommandOption.Object (Vectors2Classify.class, "trainer", "ClassifierTrainer constructor", true, new NaiveBayesTrainer(), "Java code for the constructor used to create a ClassifierTrainer. "+ "If no '(' appears, then \"new \" will be prepended and \"Trainer()\" will be appended."+ "You may use this option mutiple times to compare multiple classifiers.", null) { public void parseArg (java.lang.String arg) { // parse something like Maxent,gaussianPriorVariance=10,numIterations=20 //System.out.println("Arg = " + arg); // first, split the argument at commas. java.lang.String fields[] = arg.split(","); //Massage constructor name, so that MaxEnt, MaxEntTrainer, new MaxEntTrainer() // all call new MaxEntTrainer() java.lang.String constructorName = fields[0]; if (constructorName.indexOf('(') != -1) // if contains (), pass it though super.parseArg(arg); else { if (constructorName.endsWith("Trainer")){ super.parseArg("new " + constructorName + "()"); // add parens if they forgot }else{ super.parseArg("new "+constructorName+"Trainer()"); // make trainer name from classifier name } } // find methods associated with the class we just built Method methods[] = this.value.getClass().getMethods(); // find setters corresponding to parameter names. for (int i=1; i<fields.length; i++){ java.lang.String nameValuePair[] = fields[i].split("="); java.lang.String parameterName = nameValuePair[0]; java.lang.String parameterValue = nameValuePair[1]; //todo: check for val present! java.lang.Object parameterValueObject; try { parameterValueObject = getInterpreter().eval(parameterValue); } catch (bsh.EvalError e) { throw new IllegalArgumentException ("Java interpreter eval error on parameter "+ parameterName + "\n"+e); } boolean foundSetter = false; for (int j=0; j<methods.length; j++){ // System.out.println("method " + j + " name is " + methods[j].getName()); // System.out.println("set" + Character.toUpperCase(parameterName.charAt(0)) + parameterName.substring(1)); if ( ("set" + Character.toUpperCase(parameterName.charAt(0)) + parameterName.substring(1)).equals(methods[j].getName()) && methods[j].getParameterTypes().length == 1){ // System.out.println("Matched method " + methods[j].getName()); // Class[] ptypes = methods[j].getParameterTypes(); // System.out.println("Parameter types:"); // for (int k=0; k<ptypes.length; k++){ // System.out.println("class " + k + " = " + ptypes[k].getName()); // } try { java.lang.Object[] parameterList = new java.lang.Object[]{parameterValueObject}; // System.out.println("Argument types:"); // for (int k=0; k<parameterList.length; k++){ // System.out.println("class " + k + " = " + parameterList[k].getClass().getName()); // } methods[j].invoke(this.value, parameterList); } catch ( IllegalAccessException e) { System.out.println("IllegalAccessException " + e); throw new IllegalArgumentException ("Java access error calling setter\n"+e); } catch ( InvocationTargetException e) { System.out.println("IllegalTargetException " + e); throw new IllegalArgumentException ("Java target error calling setter\n"+e); } foundSetter = true; break; } } if (!foundSetter){ System.out.println("Parameter " + parameterName + " not found on trainer " + constructorName); System.out.println("Available parameters for " + constructorName); for (int j=0; j<methods.length; j++){ if ( methods[j].getName().startsWith("set") && methods[j].getParameterTypes().length == 1){ System.out.println(Character.toLowerCase(methods[j].getName().charAt(3)) + methods[j].getName().substring(4)); } } throw new IllegalArgumentException ("no setter found for parameter " + parameterName); } } } public void postParsing (CommandOption.List list) { assert (this.value instanceof ClassifierTrainer); //System.out.println("v2c PostParsing " + this.value); classifierTrainers.add ((ClassifierTrainer)this.value); } }; static CommandOption.String outputFile = new CommandOption.String (Vectors2Classify.class, "output-classifier", "FILENAME", true, "classifier.mallet", "The filename in which to write the classifier after it has been trained.", null); /* static CommandOption.String pipeFile = new CommandOption.String (Vectors2Classify.class, "output-pipe", "FILENAME", true, "classifier_pipe.mallet", "The filename in which to write the classifier's instancePipe after it has been trained.", null);*/ static CommandOption.String inputFile = new CommandOption.String (Vectors2Classify.class, "input", "FILENAME", true, "text.vectors", "The filename from which to read the list of training instances. Use - for stdin.", null); static CommandOption.String trainingFile = new CommandOption.String (Vectors2Classify.class, "training-file", "FILENAME", true, "text.vectors", "Read the training set instance list from this file. " + "If this is specified, the input file parameter is ignored", null); static CommandOption.String testFile = new CommandOption.String (Vectors2Classify.class, "testing-file", "FILENAME", true, "text.vectors", "Read the test set instance list to this file. " + "If this option is specified, the training-file parameter must be specified and " + " the input-file parameter is ignored", null); static CommandOption.String validationFile = new CommandOption.String (Vectors2Classify.class, "validation-file", "FILENAME", true, "text.vectors", "Read the validation set instance list to this file." + "If this option is specified, the training-file parameter must be specified and " + "the input-file parameter is ignored", null); static CommandOption.Double trainingProportionOption = new CommandOption.Double (Vectors2Classify.class, "training-portion", "DECIMAL", true, 1.0, "The fraction of the instances that should be used for training.", null); static CommandOption.Double validationProportionOption = new CommandOption.Double (Vectors2Classify.class, "validation-portion", "DECIMAL", true, 0.0, "The fraction of the instances that should be used for validation.", null); static CommandOption.Double unlabeledProportionOption = new CommandOption.Double (Vectors2Classify.class, "unlabeled-portion", "DECIMAL", true, 0.0, "The fraction of the training instances that should have their labels hidden. " +"Note that these are taken out of the training-portion, not allocated separately.", null); static CommandOption.Integer randomSeedOption = new CommandOption.Integer (Vectors2Classify.class, "random-seed", "INTEGER", true, 0, "The random seed for randomly selecting a proportion of the instance list for training", null); static CommandOption.Integer numTrialsOption = new CommandOption.Integer (Vectors2Classify.class, "num-trials", "INTEGER", true, 1, "The number of random train/test splits to perform", null); static CommandOption.Object classifierEvaluatorOption = new CommandOption.Object (Vectors2Classify.class, "classifier-evaluator", "CONSTRUCTOR", true, null, "Java code for constructing a ClassifierEvaluating object", null); // static CommandOption.Boolean printTrainAccuracyOption = new CommandOption.Boolean // (Vectors2Classify.class, "print-train-accuracy", "true|false", true, true, // "After training, run the resulting classifier on the instances included in training, " // +"and print the accuracy", null); // // static CommandOption.Boolean printTestAccuracyOption = new CommandOption.Boolean // (Vectors2Classify.class, "print-test-accuracy", "true|false", true, true, // "After training, run the resulting classifier on the instances not included in training, " // +"and print the accuracy", null); static CommandOption.Integer verbosityOption = new CommandOption.Integer (Vectors2Classify.class, "verbosity", "INTEGER", true, -1, "The level of messages to print: 0 is silent, 8 is most verbose. " + "Levels 0-8 correspond to the java.logger predefined levels "+ "off, severe, warning, info, config, fine, finer, finest, all. " + "The default value is taken from the mallet logging.properties file," + " which currently defaults to INFO level (3)", null); static CommandOption.Boolean noOverwriteProgressMessagesOption = new CommandOption.Boolean (Vectors2Classify.class, "noOverwriteProgressMessages", "true|false", false, false, "Suppress writing-in-place on terminal for progess messages - repetitive messages " +"of which only the latest is generally of interest", null); public static void main (String[] args) throws bsh.EvalError, java.io.IOException { // Process the command-line options CommandOption.setSummary (Vectors2Classify.class, "A tool for training, saving and printing diagnostics from a classifier on vectors."); CommandOption.process (Vectors2Classify.class, args); // handle default trainer here for now; default argument processing doesn't work if (!trainerConstructor.wasInvoked()){ classifierTrainers.add (new NaiveBayesTrainer()); } if (!report.wasInvoked()){ report.postParsing(null); // force postprocessing of default value } int verbosity = verbosityOption.value; Logger rootLogger = ((MalletLogger)progressLogger).getRootLogger(); if (verbosityOption.wasInvoked()){ rootLogger.setLevel( MalletLogger.LoggingLevels[verbosity]); } if (noOverwriteProgressMessagesOption.value == false){ // install special formatting for progress messages // find console handler on root logger; change formatter to one // that knows about progress messages Handler[] handlers = rootLogger.getHandlers(); for (int i = 0; i < handlers.length; i++) { if (handlers[i] instanceof ConsoleHandler) { handlers[i].setFormatter(new ProgressMessageLogFormatter()); } } } boolean separateIlists = testFile.wasInvoked() || trainingFile.wasInvoked() || validationFile.wasInvoked(); InstanceList ilist=null; InstanceList testFileIlist=null; InstanceList trainingFileIlist=null; InstanceList validationFileIlist=null; if (!separateIlists) { // normal case, --input-file specified // Read in the InstanceList, from stdin if the input filename is "-". ilist = InstanceList.load (new File(inputFile.value)); } else{ // user specified separate files for testing and training sets. trainingFileIlist = InstanceList.load (new File(trainingFile.value)); logger.info("Training vectors loaded from " + trainingFile.value); if (testFile.wasInvoked()){ testFileIlist = InstanceList.load (new File(testFile.value)); logger.info("Testing vectors loaded from " + testFile.value); if (!testFileIlist.getPipe().alphabetsMatch(trainingFileIlist.getPipe())) { throw new RuntimeException( trainingFileIlist.getPipe().getDataAlphabet() + "\n" + testFileIlist.getPipe().getDataAlphabet() + "\n" + trainingFileIlist.getPipe().getTargetAlphabet() + "\n" + testFileIlist.getPipe().getTargetAlphabet() + "\n" + "Training and testing alphabets don't match!\n"); } } if (validationFile.wasInvoked()){ validationFileIlist = InstanceList.load (new File(validationFile.value)); logger.info("validation vectors loaded from " + validationFile.value); if (!validationFileIlist.getPipe().alphabetsMatch(trainingFileIlist.getPipe())) { throw new RuntimeException( trainingFileIlist.getPipe().getDataAlphabet() + "\n" + validationFileIlist.getPipe().getDataAlphabet() + "\n" + trainingFileIlist.getPipe().getTargetAlphabet() + "\n" + validationFileIlist.getPipe().getTargetAlphabet() + "\n" + "Training and validation alphabets don't match!\n"); } } else { validationFileIlist = new InstanceList(new cc.mallet.pipe.Noop()); } } int numTrials = numTrialsOption.value; Random r = randomSeedOption.wasInvoked() ? new Random (randomSeedOption.value) : new Random (); ClassifierTrainer[] trainers = new ClassifierTrainer[classifierTrainers.size()]; for (int i = 0; i < classifierTrainers.size(); i++) { trainers[i] = classifierTrainers.get(i); logger.fine ("Trainer specified = "+trainers[i].toString()); } double trainAccuracy[][] = new double[trainers.length][numTrials]; double testAccuracy[][] = new double[trainers.length][numTrials]; double validationAccuracy[][] = new double[trainers.length][numTrials]; String trainConfusionMatrix[][] = new String[trainers.length][numTrials]; String testConfusionMatrix[][] = new String[trainers.length][numTrials]; String validationConfusionMatrix[][] = new String[trainers.length][numTrials]; double t = trainingProportionOption.value; double v = validationProportionOption.value; if (!separateIlists) { logger.info("Training portion = " + t); logger.info(" Unlabeled training sub-portion = "+unlabeledProportionOption.value); logger.info("Validation portion = " + v); logger.info("Testing portion = " + (1 - v - t)); } // for (int i=0; i<3; i++){ // for (int j=0; j<4; j++){ // System.out.print(" " + ReportOptions[i][j]); // } // System.out.println(); // } for (int trialIndex = 0; trialIndex < numTrials; trialIndex++) { System.out.println("\n-------------------- Trial " + trialIndex + " --------------------\n"); InstanceList[] ilists; BitSet unlabeledIndices = null; if (!separateIlists){ ilists = ilist.split (r, new double[] {t, 1-t-v, v}); } else { ilists = new InstanceList[3]; ilists[0] = trainingFileIlist; ilists[1] = testFileIlist; ilists[2] = validationFileIlist; } if (unlabeledProportionOption.value > 0) unlabeledIndices = new cc.mallet.util.Randoms(r.nextInt()) .nextBitSet(ilists[0].size(), unlabeledProportionOption.value); //InfoGain ig = new InfoGain (ilists[0]); //int igl = Math.min (10, ig.numLocations()); //for (int i = 0; i < igl; i++) //System.out.println ("InfoGain["+ig.getObjectAtRank(i)+"]="+ig.getValueAtRank(i)); //ig.print(); //FeatureSelection selectedFeatures = new FeatureSelection (ig, 8000); //ilists[0].setFeatureSelection (selectedFeatures); //OddsRatioFeatureInducer orfi = new OddsRatioFeatureInducer (ilists[0]); //orfi.induceFeatures (ilists[0], false, true); //System.out.println ("Training with "+ilists[0].size()+" instances"); long time[] = new long[trainers.length]; for (int c = 0; c < trainers.length; c++){ time[c] = System.currentTimeMillis(); System.out.println ("Trial " + trialIndex + " Training " + trainers[c].toString() + " with "+ilists[0].size()+" instances"); if (unlabeledProportionOption.value > 0) ilists[0].hideSomeLabels(unlabeledIndices); trainers[c].setValidationInstances(ilists[2]); Classifier classifier = trainers[c].train (ilists[0]); if (unlabeledProportionOption.value > 0) ilists[0].unhideAllLabels(); System.out.println ("Trial " + trialIndex + " Training " + trainers[c].toString() + " finished"); time[c] = System.currentTimeMillis() - time[c]; Trial trainTrial = new Trial (classifier, ilists[0]); //assert (ilists[1].size() > 0); Trial testTrial = new Trial (classifier, ilists[1]); Trial validationTrial = new Trial(classifier, ilists[2]); // gdruck - only perform evaluation if requested in report options if (ReportOptions[ReportOption.train][ReportOption.confusion] && ilists[0].size()>0) trainConfusionMatrix[c][trialIndex] = new ConfusionMatrix (trainTrial).toString(); if (ReportOptions[ReportOption.test][ReportOption.confusion] && ilists[1].size()>0) testConfusionMatrix[c][trialIndex] = new ConfusionMatrix (testTrial).toString(); if (ReportOptions[ReportOption.validation][ReportOption.confusion] && ilists[2].size()>0) validationConfusionMatrix[c][trialIndex] = new ConfusionMatrix (validationTrial).toString(); // gdruck - only perform evaluation if requested in report options if (ReportOptions[ReportOption.train][ReportOption.accuracy]) trainAccuracy[c][trialIndex] = trainTrial.getAccuracy(); if (ReportOptions[ReportOption.test][ReportOption.accuracy]) testAccuracy[c][trialIndex] = testTrial.getAccuracy(); if (ReportOptions[ReportOption.validation][ReportOption.accuracy]) validationAccuracy[c][trialIndex] = validationTrial.getAccuracy(); if (outputFile.wasInvoked()) { String filename = outputFile.value; if (trainers.length > 1) filename = filename+trainers[c].toString(); if (numTrials > 1) filename = filename+".trial"+trialIndex; try { ObjectOutputStream oos = new ObjectOutputStream (new FileOutputStream (filename)); oos.writeObject (classifier); oos.close(); } catch (Exception e) { e.printStackTrace(); throw new IllegalArgumentException ("Couldn't write classifier to filename "+ filename); } } // New Reporting // raw output if (ReportOptions[ReportOption.train][ReportOption.raw]){ System.out.println("Trial " + trialIndex + " Trainer " + trainers[c].toString()); System.out.println(" Raw Training Data"); printTrialClassification(trainTrial); } if (ReportOptions[ReportOption.test][ReportOption.raw]){ System.out.println("Trial " + trialIndex + " Trainer " + trainers[c].toString()); System.out.println(" Raw Testing Data"); printTrialClassification(testTrial); } if (ReportOptions[ReportOption.validation][ReportOption.raw]){ System.out.println("Trial " + trialIndex + " Trainer " + trainers[c].toString()); System.out.println(" Raw Validation Data"); printTrialClassification(validationTrial); } //train if (ReportOptions[ReportOption.train][ReportOption.confusion]){ System.out.println("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " Training Data Confusion Matrix"); if (ilists[0].size()>0) System.out.println (trainConfusionMatrix[c][trialIndex]); } if (ReportOptions[ReportOption.train][ReportOption.accuracy]){ System.out.println ("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " training data accuracy= "+ trainAccuracy[c][trialIndex]); } if (ReportOptions[ReportOption.train][ReportOption.f1]){ String label = ReportOptionArgs[ReportOption.train][ReportOption.f1]; System.out.println ("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " training data F1(" + label + ") = "+ trainTrial.getF1(label)); } //validation if (ReportOptions[ReportOption.validation][ReportOption.confusion]){ System.out.println("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " Validation Data Confusion Matrix"); if (ilists[2].size()>0) System.out.println (validationConfusionMatrix[c][trialIndex]); } if (ReportOptions[ReportOption.validation][ReportOption.accuracy]){ System.out.println ("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " validation data accuracy= "+ validationAccuracy[c][trialIndex]); } if (ReportOptions[ReportOption.validation][ReportOption.f1]){ String label = ReportOptionArgs[ReportOption.validation][ReportOption.f1]; System.out.println ("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " validation data F1(" + label + ") = "+ validationTrial.getF1(label)); } //test if (ReportOptions[ReportOption.test][ReportOption.confusion]){ System.out.println("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " Test Data Confusion Matrix"); if (ilists[1].size()>0) System.out.println (testConfusionMatrix[c][trialIndex]); } if (ReportOptions[ReportOption.test][ReportOption.accuracy]){ System.out.println ("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " test data accuracy= "+ testAccuracy[c][trialIndex]); } if (ReportOptions[ReportOption.test][ReportOption.f1]){ String label = ReportOptionArgs[ReportOption.test][ReportOption.f1]; System.out.println ("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " test data F1(" + label + ") = "+ testTrial.getF1(label)); } } // end for each trainer } // end for each trial // New reporting //"[train|test|validation]:[accuracy|f1|confusion|raw]" for (int c=0; c < trainers.length; c++) { System.out.println ("\n"+trainers[c].toString()); if (ReportOptions[ReportOption.train][ReportOption.accuracy]) System.out.println ("Summary. train accuracy mean = "+ MatrixOps.mean (trainAccuracy[c])+ " stddev = "+ MatrixOps.stddev (trainAccuracy[c])+ " stderr = "+ MatrixOps.stderr (trainAccuracy[c])); if (ReportOptions[ReportOption.validation][ReportOption.accuracy]) System.out.println ("Summary. validation accuracy mean = "+ MatrixOps.mean (validationAccuracy[c])+ " stddev = "+ MatrixOps.stddev (validationAccuracy[c])+ " stderr = "+ MatrixOps.stderr (validationAccuracy[c])); if (ReportOptions[ReportOption.test][ReportOption.accuracy]) System.out.println ("Summary. test accuracy mean = "+ MatrixOps.mean (testAccuracy[c])+ " stddev = "+ MatrixOps.stddev (testAccuracy[c])+ " stderr = "+ MatrixOps.stderr (testAccuracy[c])); } // end for each trainer } private static void printTrialClassification(Trial trial) { for (Classification c : trial) { Instance instance = c.getInstance(); System.out.print(instance.getName() + " " + instance.getTarget() + " "); Labeling labeling = c.getLabeling(); for (int j = 0; j < labeling.numLocations(); j++){ System.out.print(labeling.getLabelAtRank(j).toString() + ":" + labeling.getValueAtRank(j) + " "); } System.out.println(); } } }
26,707
43.513333
159
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/tui/Vectors2Vectors.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify.tui; import java.util.logging.*; import java.util.Iterator; import java.util.Random; import java.util.BitSet; import java.util.ArrayList; import java.util.Collections; import java.io.*; import cc.mallet.classify.*; import cc.mallet.pipe.*; import cc.mallet.pipe.iterator.*; import cc.mallet.types.*; import cc.mallet.util.*; /** A command-line tool for manipulating InstanceLists. For example, reducing the feature space by information gain. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class Vectors2Vectors { private static Logger logger = MalletLogger.getLogger(Vectors2Vectors.class.getName()); static CommandOption.File inputFile = new CommandOption.File (Vectors2Vectors.class, "input", "FILE", true, new File("-"), "Read the instance list from this file; Using - indicates stdin.", null); static CommandOption.File outputFile = new CommandOption.File (Vectors2Vectors.class, "output", "FILE", true, new File("-"), "Write pruned instance list to this file (use --training-file etc. if you are splitting the list). Using - indicates stdin.", null); static CommandOption.File trainingFile = new CommandOption.File (Vectors2Vectors.class, "training-file", "FILE", true, new File("training.vectors"), "Write the training set instance list to this file (or use --output if you are only pruning features); Using - indicates stdout.", null); static CommandOption.File testFile = new CommandOption.File (Vectors2Vectors.class, "testing-file", "FILE", true, new File("test.vectors"), "Write the test set instance list to this file; Using - indicates stdout.", null); static CommandOption.File validationFile = new CommandOption.File (Vectors2Vectors.class, "validation-file", "FILE", true, new File("validation.vectors"), "Write the validation set instance list to this file; Using - indicates stdout.", null); static CommandOption.Double trainingProportion = new CommandOption.Double (Vectors2Vectors.class, "training-portion", "DECIMAL", true, 1.0, "The fraction of the instances that should be used for training.", null); static CommandOption.Double validationProportion = new CommandOption.Double (Vectors2Vectors.class, "validation-portion", "DECIMAL", true, 0.0, "The fraction of the instances that should be used for validation.", null); static CommandOption.Integer randomSeed = new CommandOption.Integer (Vectors2Vectors.class, "random-seed", "INTEGER", true, 0, "The random seed for randomly selecting a proportion of the instance list for training", null); static CommandOption.Integer pruneInfogain = new CommandOption.Integer (Vectors2Vectors.class, "prune-infogain", "N", false, 0, "Reduce features to the top N by information gain.", null); static CommandOption.Integer pruneCount = new CommandOption.Integer (Vectors2Vectors.class, "prune-count", "N", false, 0, "Reduce features to those that occur more than N times.", null); static CommandOption.Boolean vectorToSequence = new CommandOption.Boolean (Vectors2Vectors.class, "vector-to-sequence", "[TRUE|FALSE]", false, false, "Convert FeatureVector's to FeatureSequence's.", null); static CommandOption.Boolean hideTargets = new CommandOption.Boolean (Vectors2Vectors.class, "hide-targets", "[TRUE|FALSE]", false, false, "Hide targets.", null); static CommandOption.Boolean revealTargets = new CommandOption.Boolean (Vectors2Vectors.class, "reveal-targets", "[TRUE|FALSE]", false, false, "Reveal targets.", null); public static void main (String[] args) throws FileNotFoundException, IOException { // Process the command-line options CommandOption.setSummary (Vectors2Vectors.class, "A tool for manipulating instance lists of feature vectors."); CommandOption.process (Vectors2Vectors.class, args); // Print some helpful messages for error cases if (args.length == 0) { CommandOption.getList(Vectors2Vectors.class).printUsage(false); System.exit (-1); } Random r = randomSeed.wasInvoked() ? new Random (randomSeed.value) : new Random (); double t = trainingProportion.value; double v = validationProportion.value; logger.info ("Training portion = "+t); logger.info ("Validation portion = "+v); logger.info ("Testing portion = "+(1-v-t)); logger.info ("Prune info gain = "+pruneInfogain.value); logger.info ("Prune count = "+pruneCount.value); // Read the InstanceList InstanceList instances = InstanceList.load (inputFile.value); if (t == 1.0 && !vectorToSequence.value && ! (pruneInfogain.wasInvoked() || pruneCount.wasInvoked()) && ! (hideTargets.wasInvoked() || revealTargets.wasInvoked())) { logger.warning("Vectors2Vectors was invoked, but did not change anything"); instances.save(trainingFile.value()); System.exit(0); } if (pruneInfogain.wasInvoked() || pruneCount.wasInvoked()) { // Are we also splitting the instances? // Current code doesn't want to do this, so I'm // not changing it, but I don't know a reason. -DM if (t != 1.0) { throw new UnsupportedOperationException("Infogain/count processing of test or validation lists not yet supported."); } if (pruneCount.value > 0) { // Check which type of data element the instances contain Instance firstInstance = instances.get(0); if (firstInstance.getData() instanceof FeatureSequence) { // Version for feature sequences Alphabet oldAlphabet = instances.getDataAlphabet(); Alphabet newAlphabet = new Alphabet(); // It's necessary to create a new instance list in // order to make sure that the data alphabet is correct. Noop newPipe = new Noop (newAlphabet, instances.getTargetAlphabet()); InstanceList newInstanceList = new InstanceList (newPipe); // Iterate over the instances in the old list, adding // up occurrences of features. int numFeatures = oldAlphabet.size(); double[] counts = new double[numFeatures]; for (int ii = 0; ii < instances.size(); ii++) { Instance instance = instances.get(ii); FeatureSequence fs = (FeatureSequence) instance.getData(); fs.addFeatureWeightsTo(counts); } Instance instance, newInstance; // Next, iterate over the same list again, adding // each instance to the new list after pruning. while (instances.size() > 0) { instance = instances.get(0); FeatureSequence fs = (FeatureSequence) instance.getData(); fs.prune(counts, newAlphabet, pruneCount.value); newInstanceList.add(newPipe.instanceFrom(new Instance(fs, instance.getTarget(), instance.getName(), instance.getSource()))); instances.remove(0); } logger.info("features: " + oldAlphabet.size() + " -> " + newAlphabet.size()); // Make the new list the official list. instances = newInstanceList; } else if (firstInstance.getData() instanceof FeatureVector) { // Version for FeatureVector Alphabet alpha2 = new Alphabet (); Noop pipe2 = new Noop (alpha2, instances.getTargetAlphabet()); InstanceList instances2 = new InstanceList (pipe2); int numFeatures = instances.getDataAlphabet().size(); double[] counts = new double[numFeatures]; for (int ii = 0; ii < instances.size(); ii++) { Instance instance = instances.get(ii); FeatureVector fv = (FeatureVector) instance.getData(); fv.addTo(counts); } BitSet bs = new BitSet(numFeatures); for (int fi = 0; fi < numFeatures; fi++) { if (counts[fi] > pruneCount.value) { bs.set(fi); } } logger.info ("Pruning "+(numFeatures-bs.cardinality())+" features out of "+numFeatures +"; leaving "+(bs.cardinality())+" features."); FeatureSelection fs = new FeatureSelection (instances.getDataAlphabet(), bs); for (int ii = 0; ii < instances.size(); ii++) { Instance instance = instances.get(ii); FeatureVector fv = (FeatureVector) instance.getData(); FeatureVector fv2 = FeatureVector.newFeatureVector (fv, alpha2, fs); instances2.add(new Instance(fv2, instance.getTarget(), instance.getName(), instance.getSource()), instances.getInstanceWeight(ii)); instance.unLock(); instance.setData(null); // So it can be freed by the garbage collector } instances = instances2; } else { throw new UnsupportedOperationException("Pruning features from " + firstInstance.getClass().getName() + " is not currently supported"); } } if (pruneInfogain.value > 0) { Alphabet alpha2 = new Alphabet (); Noop pipe2 = new Noop (alpha2, instances.getTargetAlphabet()); InstanceList instances2 = new InstanceList (pipe2); InfoGain ig = new InfoGain (instances); FeatureSelection fs = new FeatureSelection (ig, pruneInfogain.value); for (int ii = 0; ii < instances.size(); ii++) { Instance instance = instances.get(ii); FeatureVector fv = (FeatureVector) instance.getData(); FeatureVector fv2 = FeatureVector.newFeatureVector (fv, alpha2, fs); instance.unLock(); instance.setData(null); // So it can be freed by the garbage collector instances2.add(pipe2.instanceFrom(new Instance(fv2, instance.getTarget(), instance.getName(), instance.getSource())), instances.getInstanceWeight(ii)); } instances = instances2; } if (vectorToSequence.value) { // Convert FeatureVector's to FeatureSequence's by simply randomizing the order // of all the word occurrences, including repetitions due to values larger than 1. Alphabet alpha = instances.getDataAlphabet(); Noop pipe2 = new Noop (alpha, instances.getTargetAlphabet()); InstanceList instances2 = new InstanceList (pipe2); for (int ii = 0; ii < instances.size(); ii++) { Instance instance = instances.get(ii); FeatureVector fv = (FeatureVector) instance.getData(); ArrayList seq = new ArrayList(); for (int loc = 0; loc < fv.numLocations(); loc++) for (int count = 0; count < fv.valueAtLocation(loc); count++) seq.add (new Integer(fv.indexAtLocation(loc))); Collections.shuffle(seq); int[] indices = new int[seq.size()]; for (int i = 0; i < indices.length; i++) indices[i] = ((Integer)seq.get(i)).intValue(); FeatureSequence fs = new FeatureSequence (alpha, indices); instance.unLock(); instance.setData(null); // So it can be freed by the garbage collector instances2.add(pipe2.instanceFrom(new Instance(fs, instance.getTarget(), instance.getName(), instance.getSource())), instances.getInstanceWeight(ii)); } instances = instances2; } if (outputFile.wasInvoked()) { writeInstanceList (instances, outputFile.value()); } else if (trainingFile.wasInvoked()) { writeInstanceList (instances, trainingFile.value()); } else { throw new IllegalArgumentException("You must specify a file to write to, using --output [filename]"); } } else if (vectorToSequence.value) { // Convert FeatureVector's to FeatureSequence's by simply randomizing the order // of all the word occurrences, including repetitions due to values larger than 1. Alphabet alpha = instances.getDataAlphabet(); Noop pipe2 = new Noop (alpha, instances.getTargetAlphabet()); InstanceList instances2 = new InstanceList (pipe2); for (int ii = 0; ii < instances.size(); ii++) { Instance instance = instances.get(ii); FeatureVector fv = (FeatureVector) instance.getData(); ArrayList seq = new ArrayList(); for (int loc = 0; loc < fv.numLocations(); loc++) for (int count = 0; count < fv.valueAtLocation(loc); count++) seq.add (new Integer(fv.indexAtLocation(loc))); Collections.shuffle(seq); int[] indices = new int[seq.size()]; for (int i = 0; i < indices.length; i++) indices[i] = ((Integer)seq.get(i)).intValue(); FeatureSequence fs = new FeatureSequence (alpha, indices); instance.unLock(); instance.setData(null); // So it can be freed by the garbage collector instances2.add(pipe2.instanceFrom(new Instance(fs, instance.getTarget(), instance.getName(), instance.getSource())), instances.getInstanceWeight(ii)); } instances = instances2; if (outputFile.wasInvoked()) { writeInstanceList (instances, outputFile.value()); } } else if (trainingProportion.wasInvoked() || validationProportion.wasInvoked()) { // Split into three lists... InstanceList[] instanceLists = instances.split (r, new double[] {t, 1-t-v, v}); // And write them out if (instanceLists[0].size() > 0) writeInstanceList(instanceLists[0], trainingFile.value()); if (instanceLists[1].size() > 0) writeInstanceList(instanceLists[1], testFile.value()); if (instanceLists[2].size() > 0) writeInstanceList(instanceLists[2], validationFile.value()); } else if (hideTargets.wasInvoked()) { Iterator<Instance> iter = instances.iterator(); while (iter.hasNext()) { Instance instance = iter.next(); instance.unLock(); instance.setProperty("target", instance.getTarget()); instance.setTarget(null); instance.lock(); } if (outputFile.wasInvoked()) { writeInstanceList (instances, outputFile.value()); } } else if (revealTargets.wasInvoked()) { Iterator<Instance> iter = instances.iterator(); while (iter.hasNext()) { Instance instance = iter.next(); instance.unLock(); instance.setTarget(instance.getProperty("target")); instance.lock(); } if (outputFile.wasInvoked()) { writeInstanceList (instances, outputFile.value()); } } } private static void writeInstanceList(InstanceList instances, File file) throws FileNotFoundException, IOException { logger.info ("Writing instance list to "+file); instances.save(file); } }
15,281
41.926966
140
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/tui/Calo2Classify.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify.tui; import java.io.*; import java.util.*; import java.util.logging.*; import java.lang.reflect.*; import cc.mallet.classify.*; import cc.mallet.classify.evaluate.*; import cc.mallet.types.*; import cc.mallet.util.*; import java.util.Random; /** * Classify documents, run trials, print statistics from a vector file. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public abstract class Calo2Classify { private static Classifier classifierL; // CPAL - added private static Logger logger = MalletLogger.getLogger(Calo2Classify.class.getName()); private static Logger progressLogger = MalletProgressMessageLogger.getLogger(Calo2Classify.class.getName() + "-pl"); private static ArrayList classifierTrainers = new ArrayList(); private static boolean[][] ReportOptions = new boolean[3][4]; private static String[][] ReportOptionArgs = new String[3][4]; //arg in dataset:reportOption=arg // Essentially an enum mapping string names to enums to ints. private static class ReportOption { static final String[] dataOptions = {"train", "test", "validation"}; static final String[] reportOptions = {"accuracy", "f1", "confusion", "raw"}; static final int train=0; static final int test =1; static final int validation=2; static final int accuracy=0; static final int f1=1; static final int confusion=2; static final int raw=3; } static CommandOption.SpacedStrings report = new CommandOption.SpacedStrings (Calo2Classify.class, "report", "[train|test|validation]:[accuracy|f1|confusion|raw]", true, new String[] {"test:accuracy", "test:confusion", "train:accuracy"}, "", null) { public void postParsing (CommandOption.List list) { java.lang.String defaultRawFormatting = "siw"; for (int argi=0; argi<this.value.length; argi++){ // convert options like --report train:accuracy --report test:f1=labelA to // boolean array of options. // first, split the argument at semicolon. //System.out.println(argi + " " + this.value[argi]); java.lang.String arg = this.value[argi]; java.lang.String fields[] = arg.split("[:=]"); java.lang.String dataSet = fields[0]; java.lang.String reportOption = fields[1]; java.lang.String reportOptionArg = null; if (fields.length >=3){ reportOptionArg = fields[2]; } //System.out.println("Report option arg " + reportOptionArg); //find the datasource (test,train,validation) boolean foundDataSource = false; int i=0; for (; i<ReportOption.dataOptions.length; i++){ if (dataSet.equals(ReportOption.dataOptions[i])){ foundDataSource = true; break; } } if (!foundDataSource){ throw new IllegalArgumentException("Unknown argument = " + dataSet + " in --report " + this.value[argi]); } //find the report option (accuracy, f1, confusion, raw) boolean foundReportOption = false; int j=0; for (; j<ReportOption.reportOptions.length; j++){ if (reportOption.equals(ReportOption.reportOptions[j])){ foundReportOption = true; break; } } if (!foundReportOption){ throw new IllegalArgumentException("Unknown argument = " + reportOption + " in --report " + this.value[argi]); } //Mark the (dataSet,reportionOption) pair as selected ReportOptions[i][j] = true; if (j == ReportOption.f1){ // make sure a label was specified for f1 if (reportOptionArg == null){ throw new IllegalArgumentException("F1 must have label argument in --report " + this.value[argi]); } // Pass through the string argument ReportOptionArgs[i][j]= reportOptionArg; }else if (reportOptionArg != null){ throw new IllegalArgumentException("No arguments after = allowed in --report " + this.value[argi]); } } } }; static CommandOption.Object trainerConstructor = new CommandOption.Object (Calo2Classify.class, "trainer", "ClassifierTrainer constructor", true, new NaiveBayesTrainer(), "Java code for the constructor used to create a ClassifierTrainer. "+ "If no '(' appears, then \"new \" will be prepended and \"Trainer()\" will be appended."+ "You may use this option mutiple times to compare multiple classifiers.", null) { public void parseArg (java.lang.String arg) { // parse something like Maxent,gaussianPriorVariance=10,numIterations=20 //System.out.println("Arg = " + arg); // first, split the argument at commas. java.lang.String fields[] = arg.split(","); //Massage constructor name, so that MaxEnt, MaxEntTrainer, new MaxEntTrainer() // all call new MaxEntTrainer() java.lang.String constructorName = fields[0]; if (constructorName.indexOf('(') != -1) // if contains (), pass it though super.parseArg(arg); else { if (constructorName.endsWith("Trainer")){ super.parseArg("new " + constructorName + "()"); // add parens if they forgot }else{ super.parseArg("new "+constructorName+"Trainer()"); // make trainer name from classifier name } } // find methods associated with the class we just built Method methods[] = this.value.getClass().getMethods(); // find setters corresponding to parameter names. for (int i=1; i<fields.length; i++){ java.lang.String nameValuePair[] = fields[i].split("="); java.lang.String parameterName = nameValuePair[0]; java.lang.String parameterValue = nameValuePair[1]; //todo: check for val present! java.lang.Object parameterValueObject; try { parameterValueObject = getInterpreter().eval(parameterValue); } catch (bsh.EvalError e) { throw new IllegalArgumentException ("Java interpreter eval error on parameter "+ parameterName + "\n"+e); } boolean foundSetter = false; for (int j=0; j<methods.length; j++){ // System.out.println("method " + j + " name is " + methods[j].getName()); // System.out.println("set" + Character.toUpperCase(parameterName.charAt(0)) + parameterName.substring(1)); if ( ("set" + Character.toUpperCase(parameterName.charAt(0)) + parameterName.substring(1)).equals(methods[j].getName()) && methods[j].getParameterTypes().length == 1){ // System.out.println("Matched method " + methods[j].getName()); // Class[] ptypes = methods[j].getParameterTypes(); // System.out.println("Parameter types:"); // for (int k=0; k<ptypes.length; k++){ // System.out.println("class " + k + " = " + ptypes[k].getName()); // } try { java.lang.Object[] parameterList = new java.lang.Object[]{parameterValueObject}; // System.out.println("Argument types:"); // for (int k=0; k<parameterList.length; k++){ // System.out.println("class " + k + " = " + parameterList[k].getClass().getName()); // } methods[j].invoke(this.value, parameterList); } catch ( IllegalAccessException e) { System.out.println("IllegalAccessException " + e); throw new IllegalArgumentException ("Java access error calling setter\n"+e); } catch ( InvocationTargetException e) { System.out.println("IllegalTargetException " + e); throw new IllegalArgumentException ("Java target error calling setter\n"+e); } foundSetter = true; break; } } if (!foundSetter){ System.out.println("Parameter " + parameterName + " not found on trainer " + constructorName); System.out.println("Available parameters for " + constructorName); for (int j=0; j<methods.length; j++){ if ( methods[j].getName().startsWith("set") && methods[j].getParameterTypes().length == 1){ System.out.println(Character.toLowerCase(methods[j].getName().charAt(3)) + methods[j].getName().substring(4)); } } throw new IllegalArgumentException ("no setter found for parameter " + parameterName); } } } public void postParsing (CommandOption.List list) { assert (this.value instanceof ClassifierTrainer); //System.out.println("v2c PostParsing " + this.value); classifierTrainers.add (this.value); } }; // CPAL - added this to load a classifier from a file static CommandOption.String loadmodelFile = new CommandOption.String (Calo2Classify.class, "load-model", "FILENAME", true, "classifier.mallet", "The filename in which to write the classifier after it has been trained.", null); static CommandOption.String outputFile = new CommandOption.String (Calo2Classify.class, "output-classifier", "FILENAME", true, "classifier.mallet", "The filename in which to write the classifier after it has been trained.", null); static CommandOption.String inputFile = new CommandOption.String (Calo2Classify.class, "input", "FILENAME", true, "text.vectors", "The filename from which to read the list of training instances. Use - for stdin.", null); static CommandOption.String trainingFile = new CommandOption.String (Calo2Classify.class, "training-file", "FILENAME", true, "text.vectors", "Read the training set instance list from this file. " + "If this is specified, the input file parameter is ignored", null); static CommandOption.String testFile = new CommandOption.String (Calo2Classify.class, "testing-file", "FILENAME", true, "text.vectors", "Read the test set instance list to this file. " + "If this option is specified, the training-file parameter must be specified and " + " the input-file parameter is ignored", null); static CommandOption.String validationFile = new CommandOption.String (Calo2Classify.class, "validation-file", "FILENAME", true, "text.vectors", "Read the validation set instance list to this file." + "If this option is specified, the training-file parameter must be specified and " + "the input-file parameter is ignored", null); static CommandOption.Double trainingProportionOption = new CommandOption.Double (Calo2Classify.class, "training-portion", "DECIMAL", true, 1.0, "The fraction of the instances that should be used for training.", null); static CommandOption.Double validationProportionOption = new CommandOption.Double (Calo2Classify.class, "validation-portion", "DECIMAL", true, 0.0, "The fraction of the instances that should be used for validation.", null); static CommandOption.Double unlabeledProportionOption = new CommandOption.Double (Calo2Classify.class, "unlabeled-portion", "DECIMAL", true, 0.0, "The fraction of the training instances that should have their labels hidden. " +"Note that these are taken out of the training-portion, not allocated separately.", null); static CommandOption.Integer randomSeedOption = new CommandOption.Integer (Calo2Classify.class, "random-seed", "INTEGER", true, 0, "The random seed for randomly selecting a proportion of the instance list for training", null); static CommandOption.Integer numTrialsOption = new CommandOption.Integer (Calo2Classify.class, "num-trials", "INTEGER", true, 1, "The number of random train/test splits to perform", null); static CommandOption.Object classifierEvaluatorOption = new CommandOption.Object (Calo2Classify.class, "classifier-evaluator", "CONSTRUCTOR", true, null, "Java code for constructing a ClassifierEvaluating object", null); // static CommandOption.Boolean printTrainAccuracyOption = new CommandOption.Boolean // (Vectors2Classify.class, "print-train-accuracy", "true|false", true, true, // "After training, run the resulting classifier on the instances included in training, " // +"and print the accuracy", null); // // static CommandOption.Boolean printTestAccuracyOption = new CommandOption.Boolean // (Vectors2Classify.class, "print-test-accuracy", "true|false", true, true, // "After training, run the resulting classifier on the instances not included in training, " // +"and print the accuracy", null); static CommandOption.Integer verbosityOption = new CommandOption.Integer (Calo2Classify.class, "verbosity", "INTEGER", true, -1, "The level of messages to print: 0 is silent, 8 is most verbose. " + "Levels 0-8 correspond to the java.logger predefined levels "+ "off, severe, warning, info, config, fine, finer, finest, all. " + "The default value is taken from the mallet logging.properties file," + " which currently defaults to INFO level (3)", null); static CommandOption.Boolean noOverwriteProgressMessagesOption = new CommandOption.Boolean (Calo2Classify.class, "noOverwriteProgressMessages", "true|false", false, false, "Suppress writing-in-place on terminal for progess messages - repetitive messages " +"of which only the latest is generally of interest", null); public static void main (String[] args) throws bsh.EvalError, IOException { // Process the command-line options CommandOption.setSummary (Calo2Classify.class, "A tool for training, saving and printing diagnostics from a classifier on vectors."); CommandOption.process (Calo2Classify.class, args); // handle default trainer here for now; default argument processing doesn't work if (!trainerConstructor.wasInvoked()){ classifierTrainers.add (new NaiveBayesTrainer()); } if (!report.wasInvoked()){ report.postParsing(null); // force postprocessing of default value } int verbosity = verbosityOption.value; Logger rootLogger = ((MalletLogger)progressLogger).getRootLogger(); if (verbosityOption.wasInvoked()){ rootLogger.setLevel( MalletLogger.LoggingLevels[verbosity]); } if (noOverwriteProgressMessagesOption.value == false){ // install special formatting for progress messages // find console handler on root logger; change formatter to one // that knows about progress messages Handler[] handlers = rootLogger.getHandlers(); for (int i = 0; i < handlers.length; i++) { if (handlers[i] instanceof ConsoleHandler) { handlers[i].setFormatter(new ProgressMessageLogFormatter()); } } } boolean separateIlists = testFile.wasInvoked() || trainingFile.wasInvoked() || validationFile.wasInvoked(); InstanceList ilist=null; InstanceList testFileIlist=null; InstanceList trainingFileIlist=null; InstanceList validationFileIlist=null; if (!separateIlists) { // normal case, --input-file specified // Read in the InstanceList, from stdin if the input filename is "-". ilist = InstanceList.load (new File(inputFile.value)); }else{ // user specified separate files for testing and training sets. trainingFileIlist = InstanceList.load (new File(trainingFile.value)); logger.info("Training vectors loaded from " + trainingFile.value); if (testFile.wasInvoked()){ testFileIlist = InstanceList.load (new File(testFile.value)); logger.info("Testing vectors loaded from " + testFile.value); } if (validationFile.wasInvoked()){ validationFileIlist = InstanceList.load (new File(validationFile.value)); logger.info("validation vectors loaded from " + validationFile.value); } } int numTrials = numTrialsOption.value; Random r = randomSeedOption.wasInvoked() ? new Random (randomSeedOption.value) : new Random (); ClassifierTrainer[] trainers = new ClassifierTrainer[classifierTrainers.size()]; for (int i = 0; i < classifierTrainers.size(); i++) { trainers[i] = (ClassifierTrainer) classifierTrainers.get(i); logger.fine ("Trainer specified = "+trainers[i].toString()); } double trainAccuracy[][] = new double[trainers.length][numTrials]; double testAccuracy[][] = new double[trainers.length][numTrials]; double validationAccuracy[][] = new double[trainers.length][numTrials]; String trainConfusionMatrix[][] = new String[trainers.length][numTrials]; String testConfusionMatrix[][] = new String[trainers.length][numTrials]; String validationConfusionMatrix[][] = new String[trainers.length][numTrials]; double t = trainingProportionOption.value; double v = validationProportionOption.value; if (!separateIlists) { logger.info("Training portion = " + t); logger.info(" Unlabeled training sub-portion = "+unlabeledProportionOption.value); logger.info("Validation portion = " + v); logger.info("Testing portion = " + (1 - v - t)); } // for (int i=0; i<3; i++){ // for (int j=0; j<4; j++){ // System.out.print(" " + ReportOptions[i][j]); // } // System.out.println(); // } // CPAL - Initialize A Classifier to be used for each trial // CPAL - use this to load a classifier if (loadmodelFile.wasInvoked()) { String filename = loadmodelFile.value; //String filename = outputFile.value; //if (trainers.length > 1) filename = filename+trainers[c].toString(); //if (numTrials > 1) filename = filename+".trial"+trialIndex; try { //ObjectOutputStream oos = new ObjectOutputStream // (new FileOutputStream (filename)); //oos.writeObject (classifier); ObjectInputStream iis = new ObjectInputStream (new FileInputStream (filename)); classifierL = (Classifier) iis.readObject(); iis.close(); } catch (Exception e) { e.printStackTrace(); throw new IllegalArgumentException ("Couldn't read classifier from filename "+ filename); } } // CPAL for (int trialIndex = 0; trialIndex < numTrials; trialIndex++) { System.out.println("\n-------------------- Trial " + trialIndex + " --------------------\n"); InstanceList[] ilists; BitSet unlabeledIndices = null; if (!separateIlists){ ilists = ilist.split (r, new double[] {t, 1-t-v, v}); } else { ilists = new InstanceList[3]; ilists[0] = trainingFileIlist; ilists[1] = testFileIlist; ilists[2] = testFileIlist; } if (unlabeledProportionOption.value > 0) unlabeledIndices = new cc.mallet.util.Randoms(r.nextInt()) .nextBitSet(ilists[0].size(), unlabeledProportionOption.value); //InfoGain ig = new InfoGain (ilists[0]); //int igl = Math.min (10, ig.numLocations()); //for (int i = 0; i < igl; i++) //System.out.println ("InfoGain["+ig.getObjectAtRank(i)+"]="+ig.getValueAtRank(i)); //ig.print(); //FeatureSelection selectedFeatures = new FeatureSelection (ig, 8000); //ilists[0].setFeatureSelection (selectedFeatures); //OddsRatioFeatureInducer orfi = new OddsRatioFeatureInducer (ilists[0]); //orfi.induceFeatures (ilists[0], false, true); //System.out.println ("Training with "+ilists[0].size()+" instances"); long time[] = new long[trainers.length]; for (int c = 0; c < trainers.length; c++){ time[c] = System.currentTimeMillis(); System.out.println ("Trial " + trialIndex + " Training " + trainers[c].toString() + " with "+ilists[0].size()+" instances"); if (unlabeledProportionOption.value > 0) ilists[0].hideSomeLabels(unlabeledIndices); Classifier classifier; if(loadmodelFile.wasInvoked()) { classifier = classifierL; } else { classifier = trainers[c].train (ilists[0]); } if (unlabeledProportionOption.value > 0) ilists[0].unhideAllLabels(); System.out.println ("Trial " + trialIndex + " Training " + trainers[c].toString() + " finished"); time[c] = System.currentTimeMillis() - time[c]; Trial trainTrial = new Trial (classifier, ilists[0]); assert (ilists[1].size() > 0); Trial testTrial = new Trial (classifier, ilists[1]); Trial validationTrial = new Trial(classifier, ilists[2]); if (ilists[0].size()>0) trainConfusionMatrix[c][trialIndex] = new ConfusionMatrix (trainTrial).toString(); if (ilists[1].size()>0) testConfusionMatrix[c][trialIndex] = new ConfusionMatrix (testTrial).toString(); if (ilists[2].size()>0) validationConfusionMatrix[c][trialIndex] = new ConfusionMatrix (validationTrial).toString(); trainAccuracy[c][trialIndex] = trainTrial.getAccuracy(); testAccuracy[c][trialIndex] = testTrial.getAccuracy(); validationAccuracy[c][trialIndex] = validationTrial.getAccuracy(); if (outputFile.wasInvoked()) { String filename = outputFile.value; if (trainers.length > 1) filename = filename+trainers[c].toString(); if (numTrials > 1) filename = filename+".trial"+trialIndex; try { ObjectOutputStream oos = new ObjectOutputStream (new FileOutputStream (filename)); oos.writeObject (classifier); oos.close(); } catch (Exception e) { e.printStackTrace(); throw new IllegalArgumentException ("Couldn't write classifier to filename "+ filename); } } // New Reporting // raw output if (ReportOptions[ReportOption.train][ReportOption.raw]){ System.out.println("Trial " + trialIndex + " Trainer " + trainers[c].toString()); System.out.println(" Raw Training Data"); printTrialClassification(trainTrial); } if (ReportOptions[ReportOption.test][ReportOption.raw]){ System.out.println("Trial " + trialIndex + " Trainer " + trainers[c].toString()); System.out.println(" Raw Testing Data"); printTrialClassification(testTrial); } if (ReportOptions[ReportOption.validation][ReportOption.raw]){ System.out.println("Trial " + trialIndex + " Trainer " + trainers[c].toString()); System.out.println(" Raw Validation Data"); printTrialClassification(validationTrial); } //train if (ReportOptions[ReportOption.train][ReportOption.confusion]){ System.out.println("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " Training Data Confusion Matrix"); if (ilists[0].size()>0) System.out.println (trainConfusionMatrix[c][trialIndex]); } if (ReportOptions[ReportOption.train][ReportOption.accuracy]){ System.out.println ("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " training data accuracy= "+ trainAccuracy[c][trialIndex]); } if (ReportOptions[ReportOption.train][ReportOption.f1]){ String label = ReportOptionArgs[ReportOption.train][ReportOption.f1]; System.out.println ("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " training data F1(" + label + ") = "+ trainTrial.getF1(label)); } //validation if (ReportOptions[ReportOption.validation][ReportOption.confusion]){ System.out.println("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " Validation Data Confusion Matrix"); if (ilists[2].size()>0) System.out.println (validationConfusionMatrix[c][trialIndex]); } if (ReportOptions[ReportOption.validation][ReportOption.accuracy]){ System.out.println ("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " validation data accuracy= "+ validationAccuracy[c][trialIndex]); } if (ReportOptions[ReportOption.validation][ReportOption.f1]){ String label = ReportOptionArgs[ReportOption.validation][ReportOption.f1]; System.out.println ("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " validation data F1(" + label + ") = "+ validationTrial.getF1(label)); } //test if (ReportOptions[ReportOption.test][ReportOption.confusion]){ System.out.println("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " Test Data Confusion Matrix"); if (ilists[1].size()>0) System.out.println (testConfusionMatrix[c][trialIndex]); } if (ReportOptions[ReportOption.test][ReportOption.accuracy]){ System.out.println ("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " test data accuracy= "+ testAccuracy[c][trialIndex]); } if (ReportOptions[ReportOption.test][ReportOption.f1]){ String label = ReportOptionArgs[ReportOption.test][ReportOption.f1]; System.out.println ("Trial " + trialIndex + " Trainer " + trainers[c].toString() + " test data F1(" + label + ") = "+ testTrial.getF1(label)); } } // end for each trainer } // end for each trial // New reporting //"[train|test|validation]:[accuracy|f1|confusion|raw]" for (int c=0; c < trainers.length; c++) { System.out.println ("\n"+trainers[c].toString()); if (ReportOptions[ReportOption.train][ReportOption.accuracy]) System.out.println ("Summary. train accuracy mean = "+ MatrixOps.mean (trainAccuracy[c])+ " stddev = "+ MatrixOps.stddev (trainAccuracy[c])+ " stderr = "+ MatrixOps.stderr (trainAccuracy[c])); if (ReportOptions[ReportOption.validation][ReportOption.accuracy]) System.out.println ("Summary. validation accuracy mean = "+ MatrixOps.mean (validationAccuracy[c])+ " stddev = "+ MatrixOps.stddev (validationAccuracy[c])+ " stderr = "+ MatrixOps.stderr (validationAccuracy[c])); if (ReportOptions[ReportOption.test][ReportOption.accuracy]) System.out.println ("Summary. test accuracy mean = "+ MatrixOps.mean (testAccuracy[c])+ " stddev = "+ MatrixOps.stddev (testAccuracy[c])+ " stderr = "+ MatrixOps.stderr (testAccuracy[c])); } // end for each trainer } private static void printTrialClassification(Trial trial) { for (int i = 0; i < trial.size(); i++) { Instance instance = trial.get(i).getInstance(); System.out.print(instance.getName() + " " + instance.getTarget() + " "); Labeling labeling = trial.get(i).getLabeling(); for (int j = 0; j < labeling.numLocations(); j++){ System.out.print(labeling.getLabelAtRank(j).toString() + ":" + labeling.getValueAtRank(j) + " "); } System.out.println(); } } }
26,634
42.807566
159
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/tui/SvmLight2Vectors.java
/* Copyright (C) 2010 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify.tui; import java.util.ArrayList; import java.util.logging.*; import java.io.*; import java.nio.charset.Charset; import cc.mallet.pipe.*; import cc.mallet.pipe.iterator.*; import cc.mallet.types.*; import cc.mallet.util.*; /** * Command line import tool for loading a sequence of * instances from an SVMLight feature-value pair file, with one instance * per line of the input file. * <p> * * The expected format is * * target feature:value feature:value ... * * targets and features can be indices, as in * SVMLight, or Strings. * * Note that if targets and features are indices, * their indices in the data and target Alphabets * may be different, though the data will be * equivalent. * * Note that the input and output args can take multiple files. * * @author Gregory Druck */ public class SvmLight2Vectors { private static Logger logger = MalletLogger.getLogger(SvmLight2Vectors.class.getName()); static CommandOption.SpacedStrings inputFiles = new CommandOption.SpacedStrings (SvmLight2Vectors.class, "input", "FILE", true, null, "The files containing data to be classified, one instance per line", null); static CommandOption.SpacedStrings outputFiles = new CommandOption.SpacedStrings (SvmLight2Vectors.class, "output", "FILE", true, null, "Write the instance list to this file; Using - indicates stdout.", null); static CommandOption.File usePipeFromVectorsFile = new CommandOption.File (SvmLight2Vectors.class, "use-pipe-from", "FILE", true, new File("text.vectors"), "Use the pipe and alphabets from a previously created vectors file.\n" + " Allows the creation, for example, of a test set of vectors that are\n" + " compatible with a previously created set of training vectors", null); static CommandOption.Boolean printOutput = new CommandOption.Boolean (SvmLight2Vectors.class, "print-output", "[TRUE|FALSE]", false, false, "If true, print a representation of the processed data\n" + " to standard output. This option is intended for debugging.", null); static CommandOption.String encoding = new CommandOption.String (SvmLight2Vectors.class, "encoding", "STRING", true, Charset.defaultCharset().displayName(), "Character encoding for input file", null); public static void main (String[] args) throws FileNotFoundException, IOException { // Process the command-line options CommandOption.setSummary (SvmLight2Vectors.class, "A tool for creating instance lists of feature vectors from comma-separated-values"); CommandOption.process (SvmLight2Vectors.class, args); // Print some helpful messages for error cases if (args.length == 0) { CommandOption.getList(SvmLight2Vectors.class).printUsage(false); System.exit (-1); } if (inputFiles == null) { throw new IllegalArgumentException ("You must include `--input FILE FILE ...' in order to specify "+ "files containing the instances, one per line."); } Pipe instancePipe; InstanceList previousInstanceList = null; if (usePipeFromVectorsFile.wasInvoked()) { // Ignore all options, use a previously created pipe previousInstanceList = InstanceList.load (usePipeFromVectorsFile.value); instancePipe = previousInstanceList.getPipe(); } else { // Build a new pipe ArrayList<Pipe> pipeList = new ArrayList<Pipe>(); pipeList.add(new SvmLight2FeatureVectorAndLabel()); if (printOutput.value) { pipeList.add(new PrintInputAndTarget()); } instancePipe = new SerialPipes(pipeList); } if (inputFiles.value.length != outputFiles.value.length) { throw new RuntimeException("Number of input and output files must be the same."); } for (int fileIndex = 0; fileIndex < inputFiles.value.length; fileIndex++) { // Create the instance list and open the input file InstanceList instances = new InstanceList (instancePipe); Reader fileReader; if (inputFiles.value[fileIndex].equals ("-")) { fileReader = new InputStreamReader (System.in); } else { fileReader = new InputStreamReader(new FileInputStream(inputFiles.value[fileIndex]), encoding.value); } // Read instances from the file instances.addThruPipe (new SelectiveFileLineIterator (fileReader, "^\\s*#.+")); // Save instances to output file ObjectOutputStream oos; if (outputFiles.value[fileIndex].toString().equals ("-")) { oos = new ObjectOutputStream(System.out); } else { oos = new ObjectOutputStream(new FileOutputStream(outputFiles.value[fileIndex])); } oos.writeObject(instances); oos.close(); } // If we are reusing a pipe from an instance list // created earlier, we may have extended the label // or feature alphabets. To maintain compatibility, // we now save that original instance list back to disk // with the new alphabet. if (usePipeFromVectorsFile.wasInvoked()) { System.out.println(" Rewriting extended pipe from " + usePipeFromVectorsFile.value); System.out.println(" Instance ID = " + previousInstanceList.getPipe().getInstanceId()); ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(usePipeFromVectorsFile.value)); oos.writeObject(previousInstanceList); oos.close(); } } }
5,678
35.876623
105
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/tui/Text2Classify.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify.tui; import java.util.Iterator; import java.util.logging.*; import java.io.*; import java.nio.charset.Charset; import cc.mallet.classify.*; import cc.mallet.pipe.iterator.*; import cc.mallet.types.*; import cc.mallet.util.*; /** * Command line tool for classifying a sequence of * instances directly from text input, without * creating an instance list. * <p> * * @author Gregory Druck * @author David Mimno */ public class Text2Classify { private static Logger logger = MalletLogger.getLogger(Text2Classify.class.getName()); static CommandOption.SpacedStrings classDirs = new CommandOption.SpacedStrings (Text2Classify.class, "input", "DIR...", true, null, "The directories containing text files to be classified, one directory per class", null); static CommandOption.File outputFile = new CommandOption.File (Text2Classify.class, "output", "FILE", true, new File("text.vectors"), "Write the instance list to this file; Using - indicates stdout.", null); static CommandOption.String lineRegex = new CommandOption.String (Text2Classify.class, "line-regex", "REGEX", true, "^(\\S*)[\\s,]*(.*)$", "Regular expression containing regex-groups for label, name and data.", null); static CommandOption.Integer nameOption = new CommandOption.Integer (Text2Classify.class, "name", "INTEGER", true, 1, "The index of the group containing the instance name.\n" + " Use 0 to indicate that the name field is not used.", null); static CommandOption.Integer dataOption = new CommandOption.Integer (Text2Classify.class, "data", "INTEGER", true, 2, "The index of the group containing the data.", null); static CommandOption.File classifierFile = new CommandOption.File (Text2Classify.class, "classifier", "FILE", true, new File("classifier"), "Use the pipe and alphabets from a previously created vectors file.\n" + " Allows the creation, for example, of a test set of vectors that are\n" + " compatible with a previously created set of training vectors", null); static CommandOption.String encoding = new CommandOption.String (Text2Classify.class, "encoding", "STRING", true, Charset.defaultCharset().displayName(), "Character encoding for input file", null); public static void main (String[] args) throws FileNotFoundException, IOException { // Process the command-line options CommandOption.setSummary (Text2Classify.class, "A tool for classifying a stream of unlabeled instances"); CommandOption.process (Text2Classify.class, args); // Print some helpful messages for error cases if (args.length == 0) { CommandOption.getList(Text2Classify.class).printUsage(false); System.exit (-1); } if (classDirs.value.length == 0) { throw new IllegalArgumentException ("You must include --input DIR1 DIR2 ...' in order to specify a " + "list of directories containing the documents."); } // Read classifier from file Classifier classifier = null; try { ObjectInputStream ois = new ObjectInputStream (new BufferedInputStream(new FileInputStream (classifierFile.value))); classifier = (Classifier) ois.readObject(); ois.close(); } catch (Exception e) { throw new IllegalArgumentException("Problem loading classifier from file " + classifierFile.value + ": " + e.getMessage()); } // Read instances from directories File[] directories = new File[classDirs.value.length]; for (int i = 0; i < classDirs.value.length; i++) { directories[i] = new File (classDirs.value[i]); } Iterator<Instance> fileIterator = new UnlabeledFileIterator (directories); Iterator<Instance> iterator = classifier.getInstancePipe().newIteratorFrom(fileIterator); // Write classifications to the output file PrintStream out = null; if (outputFile.value.toString().equals ("-")) { out = System.out; } else { out = new PrintStream(outputFile.value, encoding.value); } // [email protected] // Stop growth on the alphabets. If this is not done and new // features are added, the feature and classifier parameter // indices will not match. classifier.getInstancePipe().getDataAlphabet().stopGrowth(); classifier.getInstancePipe().getTargetAlphabet().stopGrowth(); while (iterator.hasNext()) { Instance instance = iterator.next(); Labeling labeling = classifier.classify(instance).getLabeling(); StringBuilder output = new StringBuilder(); output.append(instance.getName()); for (int location = 0; location < labeling.numLocations(); location++) { output.append("\t" + labeling.labelAtLocation(location)); output.append("\t" + labeling.valueAtLocation(location)); } out.println(output); } if (! outputFile.value.toString().equals ("-")) { out.close(); } } }
5,242
35.922535
105
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/tui/Csv2Vectors.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify.tui; import java.util.ArrayList; import java.util.logging.*; import java.util.regex.*; import java.io.*; import java.nio.charset.Charset; import cc.mallet.classify.*; import cc.mallet.pipe.*; import cc.mallet.pipe.iterator.*; import cc.mallet.types.*; import cc.mallet.util.*; /** * Command line import tool for loading a sequence of * instances from a single file, with one instance * per line of the input file. * <p> * Despite the name of the class, input data does not * have to be comma-separated, and instance data can * remain sequences (rather than unordered vectors). * * @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class Csv2Vectors { private static Logger logger = MalletLogger.getLogger(Csv2Vectors.class.getName()); static CommandOption.File inputFile = new CommandOption.File (Csv2Vectors.class, "input", "FILE", true, null, "The file containing data to be classified, one instance per line", null); static CommandOption.File outputFile = new CommandOption.File (Csv2Vectors.class, "output", "FILE", true, new File("text.vectors"), "Write the instance list to this file; Using - indicates stdout.", null); static CommandOption.String lineRegex = new CommandOption.String (Csv2Vectors.class, "line-regex", "REGEX", true, "^(\\S*)[\\s,]*(\\S*)[\\s,]*(.*)$", "Regular expression containing regex-groups for label, name and data.", null); static CommandOption.Integer labelOption = new CommandOption.Integer (Csv2Vectors.class, "label", "INTEGER", true, 2, "The index of the group containing the label string.\n" + " Use 0 to indicate that the label field is not used.", null); static CommandOption.Integer nameOption = new CommandOption.Integer (Csv2Vectors.class, "name", "INTEGER", true, 1, "The index of the group containing the instance name.\n" + " Use 0 to indicate that the name field is not used.", null); static CommandOption.Integer dataOption = new CommandOption.Integer (Csv2Vectors.class, "data", "INTEGER", true, 3, "The index of the group containing the data.", null); static CommandOption.File usePipeFromVectorsFile = new CommandOption.File (Csv2Vectors.class, "use-pipe-from", "FILE", true, new File("text.vectors"), "Use the pipe and alphabets from a previously created vectors file.\n" + " Allows the creation, for example, of a test set of vectors that are\n" + " compatible with a previously created set of training vectors", null); static CommandOption.Boolean keepSequence = new CommandOption.Boolean (Csv2Vectors.class, "keep-sequence", "[TRUE|FALSE]", false, false, "If true, final data will be a FeatureSequence rather than a FeatureVector.", null); static CommandOption.Boolean keepSequenceBigrams = new CommandOption.Boolean (Csv2Vectors.class, "keep-sequence-bigrams", "[TRUE|FALSE]", false, false, "If true, final data will be a FeatureSequenceWithBigrams rather than a FeatureVector.", null); static CommandOption.Boolean removeStopWords = new CommandOption.Boolean (Csv2Vectors.class, "remove-stopwords", "[TRUE|FALSE]", false, false, "If true, remove a default list of common English \"stop words\" from the text.", null); static CommandOption.File stoplistFile = new CommandOption.File (Csv2Vectors.class, "stoplist-file", "FILE", true, null, "Read \"stop words\" from a file, one per line. Implies --remove-stopwords", null); static CommandOption.File extraStopwordsFile = new CommandOption.File (Csv2Vectors.class, "extra-stopwords", "FILE", true, null, "Read whitespace-separated words from this file, and add them to either " + " the default English stoplist or the list specified by --stoplist-file.", null); static CommandOption.Boolean preserveCase = new CommandOption.Boolean (Csv2Vectors.class, "preserve-case", "[TRUE|FALSE]", false, false, "If true, do not force all strings to lowercase.", null); static CommandOption.String encoding = new CommandOption.String (Csv2Vectors.class, "encoding", "STRING", true, Charset.defaultCharset().displayName(), "Character encoding for input file", null); static CommandOption.String tokenRegex = new CommandOption.String (Csv2Vectors.class, "token-regex", "REGEX", true, CharSequenceLexer.LEX_ALPHA.toString(), "Regular expression used for tokenization.\n" + " Example: \"[\\p{L}\\p{N}_]+|[\\p{P}]+\" (unicode letters, numbers and underscore OR all punctuation) ", null); static CommandOption.Boolean printOutput = new CommandOption.Boolean (Csv2Vectors.class, "print-output", "[TRUE|FALSE]", false, false, "If true, print a representation of the processed data\n" + " to standard output. This option is intended for debugging.", null); public static void main (String[] args) throws FileNotFoundException, IOException { // Process the command-line options CommandOption.setSummary (Csv2Vectors.class, "A tool for creating instance lists of feature vectors from comma-separated-values"); CommandOption.process (Csv2Vectors.class, args); // Print some helpful messages for error cases if (args.length == 0) { CommandOption.getList(Csv2Vectors.class).printUsage(false); System.exit (-1); } if (inputFile == null) { throw new IllegalArgumentException ("You must include `--input FILE ...' in order to specify a"+ "file containing the instances, one per line."); } Pipe instancePipe; InstanceList previousInstanceList = null; if (usePipeFromVectorsFile.wasInvoked()) { // Ignore all options, use a previously created pipe previousInstanceList = InstanceList.load (usePipeFromVectorsFile.value); instancePipe = previousInstanceList.getPipe(); } else { // Build a new pipe ArrayList<Pipe> pipeList = new ArrayList<Pipe>(); // Convert the "target" object into a numeric index // into a LabelAlphabet. if (labelOption.value > 0) { // If the label field is not used, adding this // pipe will cause "Alphabets don't match" exceptions. pipeList.add(new Target2Label()); } // // Tokenize the input: first compile the tokenization pattern // Pattern tokenPattern = null; if (keepSequenceBigrams.value) { // We do not want to record bigrams across punctuation, // so we need to keep non-word tokens. tokenPattern = CharSequenceLexer.LEX_NONWHITESPACE_CLASSES; } else { // Otherwise, try to compile the regular expression pattern. try { tokenPattern = Pattern.compile(tokenRegex.value); } catch (PatternSyntaxException pse) { throw new IllegalArgumentException("The token regular expression (" + tokenRegex.value + ") was invalid: " + pse.getMessage()); } } // Add the tokenizer pipeList.add(new CharSequence2TokenSequence(tokenPattern)); // // Normalize the input as necessary // if (! preserveCase.value()) { pipeList.add(new TokenSequenceLowercase()); } if (keepSequenceBigrams.value) { // Remove non-word tokens, but record the fact that they // were there. pipeList.add(new TokenSequenceRemoveNonAlpha(true)); } // Stopword removal. if (stoplistFile.wasInvoked()) { // The user specified a new list TokenSequenceRemoveStopwords stopwordFilter = new TokenSequenceRemoveStopwords(stoplistFile.value, encoding.value, false, // don't include default list false, keepSequenceBigrams.value); if (extraStopwordsFile.wasInvoked()) { stopwordFilter.addStopWords(extraStopwordsFile.value); } pipeList.add(stopwordFilter); } else if (removeStopWords.value) { // The user did not specify a new list, so use the default // built-in English list, possibly adding extra words. TokenSequenceRemoveStopwords stopwordFilter = new TokenSequenceRemoveStopwords(false, keepSequenceBigrams.value); if (extraStopwordsFile.wasInvoked()) { stopwordFilter.addStopWords(extraStopwordsFile.value); } pipeList.add(stopwordFilter); } // // Convert tokens to numeric indices into the Alphabet // if (keepSequenceBigrams.value) { // Output is feature sequences with bigram features pipeList.add(new TokenSequence2FeatureSequenceWithBigrams()); } else if (keepSequence.value) { // Output is unigram feature sequences pipeList.add(new TokenSequence2FeatureSequence()); } else { // Output is feature vectors (no sequence information) pipeList.add(new TokenSequence2FeatureSequence()); pipeList.add(new FeatureSequence2AugmentableFeatureVector()); } if (printOutput.value) { pipeList.add(new PrintInputAndTarget()); } instancePipe = new SerialPipes(pipeList); } // // Create the instance list and open the input file // InstanceList instances = new InstanceList (instancePipe); Reader fileReader; if (inputFile.value.toString().equals ("-")) { fileReader = new InputStreamReader (System.in); } else { fileReader = new InputStreamReader(new FileInputStream(inputFile.value), encoding.value); } // // Read instances from the file // instances.addThruPipe (new CsvIterator (fileReader, Pattern.compile(lineRegex.value), dataOption.value, labelOption.value, nameOption.value)); // // Save instances to output file // ObjectOutputStream oos; if (outputFile.value.toString().equals ("-")) { oos = new ObjectOutputStream(System.out); } else { oos = new ObjectOutputStream(new FileOutputStream(outputFile.value)); } oos.writeObject(instances); oos.close(); // If we are reusing a pipe from an instance list // created earlier, we may have extended the label // or feature alphabets. To maintain compatibility, // we now save that original instance list back to disk // with the new alphabet. if (usePipeFromVectorsFile.wasInvoked()) { System.out.println(" Rewriting extended pipe from " + usePipeFromVectorsFile.value); System.out.println(" Instance ID = " + previousInstanceList.getPipe().getInstanceId()); oos = new ObjectOutputStream(new FileOutputStream(usePipeFromVectorsFile.value)); oos.writeObject(previousInstanceList); oos.close(); } } }
10,861
34.496732
117
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/tui/Vectors2FeatureConstraints.java
package cc.mallet.classify.tui; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.FileReader; import java.io.FileWriter; import java.io.ObjectInputStream; import java.util.ArrayList; import java.util.HashMap; import java.util.logging.Logger; import cc.mallet.classify.FeatureConstraintUtil; import cc.mallet.topics.ParallelTopicModel; import cc.mallet.types.Alphabet; import cc.mallet.types.InstanceList; import cc.mallet.util.CommandOption; import cc.mallet.util.MalletLogger; /** * Create "feature constraints" from data for use in GE training. * @author Gregory Druck <a href="mailto:[email protected]">[email protected]</a> */ public class Vectors2FeatureConstraints { private static Logger logger = MalletLogger.getLogger(Vectors2FeatureConstraints.class.getName()); public static CommandOption.File vectorsFile = new CommandOption.File(Vectors2FeatureConstraints.class, "input", "FILENAME", true, null, "Data file used to generate constraints.", null); public static CommandOption.File constraintsFile = new CommandOption.File(Vectors2FeatureConstraints.class, "output", "FILENAME", true, null, "Output file for constraints.", null); public static CommandOption.File featuresFile = new CommandOption.File(Vectors2FeatureConstraints.class, "features-file", "FILENAME", false, null, "File with list of features used to generate constraints.", null); public static CommandOption.File ldaFile = new CommandOption.File(Vectors2FeatureConstraints.class, "lda-file", "FILENAME", false, null, "File with serialized LDA object (if using LDA feature constraint selection).", null); public static CommandOption.Integer numConstraints = new CommandOption.Integer(Vectors2FeatureConstraints.class, "num-constraints", "FILENAME", true, 10, "Number of feature constraints.", null); public static CommandOption.String featureSelection = new CommandOption.String(Vectors2FeatureConstraints.class, "feature-selection", "STRING", true, "infogain | lda", "Method used to choose feature constraints.", null); public static CommandOption.String targets = new CommandOption.String(Vectors2FeatureConstraints.class, "targets", "STRING", true, "none | oracle | heuristic | voted", "Method used to estimate constraint targets.", null); public static CommandOption.Double majorityProb = new CommandOption.Double(Vectors2FeatureConstraints.class, "majority-prob", "DOUBLE", false, 0.9, "Probability for majority labels when using heuristic target estimation.", null); public static void main(String[] args) { CommandOption.process(Vectors2FeatureConstraints.class, args); InstanceList list = InstanceList.load(vectorsFile.value); // Here we will assume that we use all labeled data available. ArrayList<Integer> features = null; HashMap<Integer,ArrayList<Integer>> featuresAndLabels = null; // if a features file was specified, then load features from the file if (featuresFile.wasInvoked()) { if (fileContainsLabels(featuresFile.value)) { // better error message from [email protected] if (targets.value.equals("oracle")) { throw new RuntimeException("with --targets oracle, features file must be unlabeled"); } featuresAndLabels = readFeaturesAndLabelsFromFile(featuresFile.value, list.getDataAlphabet(), list.getTargetAlphabet()); } else { features = readFeaturesFromFile(featuresFile.value, list.getDataAlphabet()); } } // otherwise select features using specified method else { if (featureSelection.value.equals("infogain")) { features = FeatureConstraintUtil.selectFeaturesByInfoGain(list,numConstraints.value); } else if (featureSelection.value.equals("lda")) { try { ObjectInputStream ois = new ObjectInputStream(new FileInputStream(ldaFile.value)); ParallelTopicModel lda = (ParallelTopicModel)ois.readObject(); features = FeatureConstraintUtil.selectTopLDAFeatures(numConstraints.value, lda, list.getDataAlphabet()); } catch (Exception e) { e.printStackTrace(); } } else { throw new RuntimeException("Unsupported value for feature selection: " + featureSelection.value); } } // If the target method is oracle, then we do not need feature "labels". HashMap<Integer,double[]> constraints = null; if (targets.value.equals("none")) { constraints = new HashMap<Integer,double[]>(); for (int fi : features) { constraints.put(fi, null); } } else if (targets.value.equals("oracle")) { constraints = FeatureConstraintUtil.setTargetsUsingData(list, features); } else { // For other methods, we need to get feature labels, as // long as they haven't been already loaded from disk. if (featuresAndLabels == null) { featuresAndLabels = FeatureConstraintUtil.labelFeatures(list,features); for (int fi : featuresAndLabels.keySet()) { logger.info(list.getDataAlphabet().lookupObject(fi) + ": "); for (int li : featuresAndLabels.get(fi)) { logger.info(list.getTargetAlphabet().lookupObject(li) + " "); } } } if (targets.value.equals("heuristic")) { constraints = FeatureConstraintUtil.setTargetsUsingHeuristic(featuresAndLabels,list.getTargetAlphabet().size(),majorityProb.value); } else if (targets.value.equals("voted")) { constraints = FeatureConstraintUtil.setTargetsUsingFeatureVoting(featuresAndLabels,list); } else { throw new RuntimeException("Unsupported value for targets: " + targets.value); } } writeConstraints(constraints,constraintsFile.value,list.getDataAlphabet(),list.getTargetAlphabet()); } private static boolean fileContainsLabels(File file) { String line = ""; try { BufferedReader reader = new BufferedReader(new FileReader(file)); line = reader.readLine().trim(); } catch (Exception e) { e.printStackTrace(); System.exit(1); } String[] split = line.split("\\s+"); if (split.length == 1) { return false; } return true; } private static ArrayList<Integer> readFeaturesFromFile(File file, Alphabet dataAlphabet) { ArrayList<Integer> features = new ArrayList<Integer>(); try { BufferedReader reader = new BufferedReader(new FileReader(file)); String line = reader.readLine(); while (line != null) { line = line.trim(); int featureIndex = dataAlphabet.lookupIndex(line,false); features.add(featureIndex); line = reader.readLine(); } } catch (Exception e) { e.printStackTrace(); System.exit(1); } return features; } private static HashMap<Integer,ArrayList<Integer>> readFeaturesAndLabelsFromFile(File file, Alphabet dataAlphabet, Alphabet targetAlphabet) { HashMap<Integer,ArrayList<Integer>> featuresAndLabels = new HashMap<Integer,ArrayList<Integer>>(); try { BufferedReader reader = new BufferedReader(new FileReader(file)); String line = reader.readLine(); while (line != null) { line = line.trim(); String[] split = line.split("\\s+"); int featureIndex = dataAlphabet.lookupIndex(split[0],false); // better error message from [email protected] if (featureIndex == -1) { throw new RuntimeException("Couldn't find feature '" + split[0] + "' in the data alphabet."); } ArrayList<Integer> labels = new ArrayList<Integer>(); for (int i = 1; i < split.length; i++) { // TODO should these be label names? int li = targetAlphabet.lookupIndex(split[i]); labels.add(li); logger.info("found label " + li); } featuresAndLabels.put(featureIndex,labels); line = reader.readLine(); } } catch (Exception e) { e.printStackTrace(); System.exit(1); } return featuresAndLabels; } private static void writeConstraints(HashMap<Integer,double[]> constraints, File constraintsFile, Alphabet dataAlphabet, Alphabet targetAlphabet) { if (constraints.size() == 0) { logger.warning("No constraints written!"); return; } try { FileWriter writer = new FileWriter(constraintsFile); for (int fi : constraints.keySet()) { writer.write(dataAlphabet.lookupObject(fi) + " "); double[] p = constraints.get(fi); if (p != null) { for (int li = 0; li < p.length; li++) { writer.write(targetAlphabet.lookupObject(li) + ":" + p[li] + " "); } } writer.write("\n"); } writer.close(); } catch (Exception e) { e.printStackTrace(); System.exit(1); } } }
9,088
36.25
149
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/tui/Vectors2Info.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify.tui; import java.util.logging.*; import java.io.*; import cc.mallet.classify.*; import cc.mallet.pipe.*; import cc.mallet.pipe.iterator.*; import cc.mallet.types.*; import cc.mallet.util.*; /** * Diagnostic facilities for a vector file. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class Vectors2Info { private static Logger logger = MalletLogger.getLogger(Vectors2Info.class.getName()); static CommandOption.File inputFile = new CommandOption.File (Vectors2Info.class, "input", "FILE", true, new File("-"), "Read the instance list from this file; Using - indicates stdin.", null); static CommandOption.Integer printInfogain = new CommandOption.Integer (Vectors2Info.class, "print-infogain", "N", false, 0, "Print top N words by information gain, sorted.", null); static CommandOption.Boolean printLabels = new CommandOption.Boolean (Vectors2Info.class, "print-labels", "[TRUE|FALSE]", false, false, "Print class labels known to instance list, one per line.", null); static CommandOption.String printMatrix = new CommandOption.String (Vectors2Info.class, "print-matrix", "STRING", false, "sic", "Print word/document matrix in the specified format (a|s)(b|i)(n|w|c|e)", null) { public void parseArg(java.lang.String arg) { if (arg == null) arg = this.defaultValue; //System.out.println("pa arg=" + arg); // sanity check the raw printing options (a la Rainbow) char c0 = arg.charAt(0); char c1 = arg.charAt(1); char c2 = arg.charAt(2); if (arg.length() != 3 || (c0 != 's' && c0 != 'a') || (c1 != 'b' && c1 != 'i') || (c2 != 'n' && c2 != 'w' && c2 != 'c' && c2 != 'e')) { throw new IllegalArgumentException("Illegal argument = " + arg + " in --print-matrix=" +arg); } value = arg; } }; public static void main (String[] args) throws FileNotFoundException, IOException { // Process the command-line options CommandOption.setSummary (Vectors2Info.class, "A tool for printing information about instance lists of feature vectors."); CommandOption.process (Vectors2Info.class, args); // Print some helpful messages for error cases if (args.length == 0) { CommandOption.getList(Vectors2Info.class).printUsage(false); System.exit (-1); } if (false && !inputFile.wasInvoked()) { System.err.println ("You must specify an input instance list, with --input."); System.exit (-1); } // Read the InstanceList InstanceList ilist = InstanceList.load (inputFile.value); if (printLabels.value) { Alphabet la = ilist.getTargetAlphabet (); for (int i = 0; i < la.size(); i++) System.out.println (la.lookupObject (i)); System.out.print ("\n"); } if (printInfogain.value > 0) { InfoGain ig = new InfoGain (ilist); for (int i = 0; i < printInfogain.value; i++) System.out.println (""+i+" "+ig.getObjectAtRank(i)); System.out.print ("\n"); } if (printMatrix.wasInvoked()){ printInstanceList(ilist, printMatrix.value); } } /* print an instance list according to the format string */ private static void printInstanceList(InstanceList ilist, String formatString) { //private static double[] calcFeatureCounts (InstanceList ilist) from FeatureCounts //System.out.println("PIL formatString " + formatString); int numInstances = ilist.size(); int numClasses = ilist.getTargetAlphabet().size(); int numFeatures = ilist.getDataAlphabet().size(); Alphabet dataAlphabet = ilist.getDataAlphabet(); double[] counts = new double[numFeatures]; double count; for (int i = 0; i < ilist.size(); i++) { Instance inst = ilist.get(i); if (!(inst.getData() instanceof FeatureVector)) throw new IllegalArgumentException ("Currently only handles FeatureVector data"); FeatureVector fv = (FeatureVector) inst.getData (); //if (ilist.getInstanceWeight(i) == 0) // continue; System.out.print(inst.getName() + " " + inst.getTarget()); if (formatString.charAt(0) == 'a'){ // all features for (int fvi=0; fvi<numFeatures; fvi++){ printFeature(dataAlphabet.lookupObject(fvi), fvi, fv.value(fvi), formatString); } } else{ // sparse; only features present in vector for (int l = 0; l < fv.numLocations(); l++) { int fvi = fv.indexAtLocation(l); printFeature(dataAlphabet.lookupObject(fvi), fvi, fv.valueAtLocation(l), formatString); //System.out.print(" " + dataAlphabet.lookupObject(j) + " " + ((int) fv.valueAtLocation(j))); } } System.out.println(); } System.out.println(); return; // counts; } /* helper for printInstanceList. prints a single feature within an instance */ private static void printFeature(Object o, int fvi, double featureValue, String formatString) { // print object n,w,c,e char c1 = formatString.charAt(2); if (c1 == 'w') { // word System.out.print(" " + o); } else if (c1 == 'n') { // index of word System.out.print(" " + fvi); } else if (c1 == 'c') { //word and index System.out.print(" " + o + ":" + fvi); } else if (c1 == 'e'){ //no word identity } char c2 = formatString.charAt(1); if (c2 == 'i') { // integer count System.out.print(" " + ((int)(featureValue + .5))); } else if (c2 == 'b') { // boolean present/not present System.out.print(" " + ((featureValue>0.5) ? "1" : "0")); } } }
5,819
32.448276
98
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/tui/Csv2Classify.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify.tui; import java.util.Iterator; import java.util.logging.*; import java.util.regex.*; import java.io.*; import java.nio.charset.Charset; import cc.mallet.classify.*; import cc.mallet.pipe.iterator.*; import cc.mallet.types.*; import cc.mallet.util.*; /** * Command line tool for classifying a sequence of * instances directly from text input, without * creating an instance list. * <p> * * @author David Mimno * @author Gregory Druck */ public class Csv2Classify { private static Logger logger = MalletLogger.getLogger(Csv2Classify.class.getName()); static CommandOption.File inputFile = new CommandOption.File (Csv2Classify.class, "input", "FILE", true, null, "The file containing data to be classified, one instance per line", null); static CommandOption.File outputFile = new CommandOption.File (Csv2Classify.class, "output", "FILE", true, new File("text.vectors"), "Write the instance list to this file; Using - indicates stdout.", null); static CommandOption.String lineRegex = new CommandOption.String (Csv2Classify.class, "line-regex", "REGEX", true, "^(\\S*)[\\s,]*(.*)$", "Regular expression containing regex-groups for label, name and data.", null); static CommandOption.Integer nameOption = new CommandOption.Integer (Csv2Classify.class, "name", "INTEGER", true, 1, "The index of the group containing the instance name.\n" + " Use 0 to indicate that the name field is not used.", null); static CommandOption.Integer dataOption = new CommandOption.Integer (Csv2Classify.class, "data", "INTEGER", true, 2, "The index of the group containing the data.", null); static CommandOption.File classifierFile = new CommandOption.File (Csv2Classify.class, "classifier", "FILE", true, new File("classifier"), "Use the pipe and alphabets from a previously created vectors file.\n" + " Allows the creation, for example, of a test set of vectors that are\n" + " compatible with a previously created set of training vectors", null); static CommandOption.String encoding = new CommandOption.String (Csv2Classify.class, "encoding", "STRING", true, Charset.defaultCharset().displayName(), "Character encoding for input file", null); public static void main (String[] args) throws FileNotFoundException, IOException { // Process the command-line options CommandOption.setSummary (Csv2Classify.class, "A tool for classifying a stream of unlabeled instances"); CommandOption.process (Csv2Classify.class, args); // Print some helpful messages for error cases if (args.length == 0) { CommandOption.getList(Csv2Classify.class).printUsage(false); System.exit (-1); } if (inputFile == null) { throw new IllegalArgumentException ("You must include `--input FILE ...' in order to specify a"+ "file containing the instances, one per line."); } // Read classifier from file Classifier classifier = null; try { ObjectInputStream ois = new ObjectInputStream (new BufferedInputStream(new FileInputStream (classifierFile.value))); classifier = (Classifier) ois.readObject(); ois.close(); } catch (Exception e) { throw new IllegalArgumentException("Problem loading classifier from file " + classifierFile.value + ": " + e.getMessage()); } // Read instances from the file Reader fileReader; if (inputFile.value.toString().equals ("-")) { fileReader = new InputStreamReader (System.in); } else { fileReader = new InputStreamReader(new FileInputStream(inputFile.value), encoding.value); } Iterator<Instance> csvIterator = new CsvIterator (fileReader, Pattern.compile(lineRegex.value), dataOption.value, 0, nameOption.value); Iterator<Instance> iterator = classifier.getInstancePipe().newIteratorFrom(csvIterator); // Write classifications to the output file PrintStream out = null; if (outputFile.value.toString().equals ("-")) { out = System.out; } else { out = new PrintStream(outputFile.value, encoding.value); } // [email protected] // Stop growth on the alphabets. If this is not done and new // features are added, the feature and classifier parameter // indices will not match. classifier.getInstancePipe().getDataAlphabet().stopGrowth(); classifier.getInstancePipe().getTargetAlphabet().stopGrowth(); while (iterator.hasNext()) { Instance instance = iterator.next(); Labeling labeling = classifier.classify(instance).getLabeling(); StringBuilder output = new StringBuilder(); output.append(instance.getName()); for (int location = 0; location < labeling.numLocations(); location++) { output.append("\t" + labeling.labelAtLocation(location)); output.append("\t" + labeling.valueAtLocation(location)); } out.println(output); } if (! outputFile.value.toString().equals ("-")) { out.close(); } } }
5,350
34.437086
102
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/tui/Classifier2Info.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify.tui; import java.util.logging.*; import java.io.*; import cc.mallet.classify.*; import cc.mallet.pipe.*; import cc.mallet.pipe.iterator.*; import cc.mallet.types.*; import cc.mallet.util.*; /** * Diagnostic facilities for a classifier. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class Classifier2Info { private static Logger logger = MalletLogger.getLogger(Classifier2Info.class.getName()); static CommandOption.File classifierFile = new CommandOption.File (Classifier2Info.class, "classifier", "FILE", true, new File("-"), "Read the saved classifier from this file.", null); public static void main (String[] args) throws FileNotFoundException, IOException { // Process the command-line options CommandOption.setSummary (Classifier2Info.class, "A tool for printing information about saved classifiers."); CommandOption.process (Classifier2Info.class, args); // Print some helpful messages for error cases if (args.length == 0) { CommandOption.getList(Classifier2Info.class).printUsage(false); System.exit (-1); } // Read in the classifier Classifier classifier; try { ObjectInputStream ois = new ObjectInputStream (new FileInputStream (classifierFile.value)); classifier = (Classifier) ois.readObject(); ois.close(); } catch (Exception e) { e.printStackTrace(); throw new IllegalArgumentException ("Couldn't read classifier "+classifierFile.value); } classifier.print (); } }
1,952
29.515625
94
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/tui/Text2Vectors.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify.tui; import java.util.ArrayList; import java.util.logging.*; import java.util.regex.*; import java.io.*; import java.nio.charset.Charset; import cc.mallet.pipe.*; import cc.mallet.pipe.iterator.*; import cc.mallet.types.*; import cc.mallet.util.*; /** * Convert document files into vectors (a persistent instance list). * @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class Text2Vectors { private static Logger logger = MalletLogger.getLogger(Text2Vectors.class.getName()); static CommandOption.SpacedStrings classDirs = new CommandOption.SpacedStrings (Text2Vectors.class, "input", "DIR...", true, null, "The directories containing text files to be classified, one directory per class", null); static CommandOption.File outputFile = new CommandOption.File (Text2Vectors.class, "output", "FILE", true, new File("text.vectors"), "Write the instance list to this file; Using - indicates stdout.", null); static CommandOption.File usePipeFromVectorsFile = new CommandOption.File (Text2Vectors.class, "use-pipe-from", "FILE", true, new File("text.vectors"), "Use the pipe and alphabets from a previously created vectors file. " + "Allows the creation, for example, of a test set of vectors that are " + "compatible with a previously created set of training vectors", null); static CommandOption.Boolean preserveCase = new CommandOption.Boolean (Text2Vectors.class, "preserve-case", "[TRUE|FALSE]", false, false, "If true, do not force all strings to lowercase.", null); static CommandOption.Boolean removeStopWords = new CommandOption.Boolean (Text2Vectors.class, "remove-stopwords", "[TRUE|FALSE]", false, false, "If true, remove a default list of common English \"stop words\" from the text.", null); static CommandOption.File stoplistFile = new CommandOption.File (Text2Vectors.class, "stoplist-file", "FILE", true, null, "Read \"stop words\" from a file, one per line. Implies --remove-stopwords", null); static CommandOption.File extraStopwordsFile = new CommandOption.File (Text2Vectors.class, "extra-stopwords", "FILE", true, null, "Read whitespace-separated words from this file, and add them to either " + " the default English stoplist or the list specified by --stoplist-file.", null); static CommandOption.Boolean skipHeader = new CommandOption.Boolean (Text2Vectors.class, "skip-header", "[TRUE|FALSE]", false, false, "If true, in each document, remove text occurring before a blank line."+ " This is useful for removing email or UseNet headers", null); static CommandOption.Boolean skipHtml = new CommandOption.Boolean (Text2Vectors.class, "skip-html", "[TRUE|FALSE]", false, false, "If true, remove text occurring inside <...>, as in HTML or SGML.", null); static CommandOption.Boolean binaryFeatures = new CommandOption.Boolean (Text2Vectors.class, "binary-features", "[TRUE|FALSE]", false, false, "If true, features will be binary.", null); static CommandOption.IntegerArray gramSizes = new CommandOption.IntegerArray (Text2Vectors.class, "gram-sizes", "INTEGER,[INTEGER,...]", true, new int[] {1}, "Include among the features all n-grams of sizes specified. "+ "For example, to get all unigrams and bigrams, use --gram-sizes 1,2. "+ "This option occurs after the removal of stop words, if removed.", null); static CommandOption.Boolean keepSequence = new CommandOption.Boolean (Text2Vectors.class, "keep-sequence", "[TRUE|FALSE]", false, false, "If true, final data will be a FeatureSequence rather than a FeatureVector.", null); static CommandOption.Boolean keepSequenceBigrams = new CommandOption.Boolean (Text2Vectors.class, "keep-sequence-bigrams", "[TRUE|FALSE]", false, false, "If true, final data will be a FeatureSequenceWithBigrams rather than a FeatureVector.", null); static CommandOption.Boolean saveTextInSource = new CommandOption.Boolean (Text2Vectors.class, "save-text-in-source", "[TRUE|FALSE]", false, false, "If true, save original text of document in source.", null); static CommandOption.ObjectFromBean stringPipe = new CommandOption.ObjectFromBean (Text2Vectors.class, "string-pipe", "Pipe constructor", true, null, "Java code for the constructor of a Pipe to be run as soon as input becomes a CharSequence", null); static CommandOption.ObjectFromBean tokenPipe = new CommandOption.ObjectFromBean (Text2Vectors.class, "token-pipe", "Pipe constructor", true, null, "Java code for the constructor of a Pipe to be run as soon as input becomes a TokenSequence", null); static CommandOption.ObjectFromBean featureVectorPipe = new CommandOption.ObjectFromBean (Text2Vectors.class, "fv-pipe", "Pipe constructor", true, null, "Java code for the constructor of a Pipe to be run as soon as input becomes a FeatureVector", null); static CommandOption.String encoding = new CommandOption.String (Text2Vectors.class, "encoding", "STRING", true, Charset.defaultCharset().displayName(), "Character encoding for input file", null); static CommandOption.String tokenRegex = new CommandOption.String (Text2Vectors.class, "token-regex", "REGEX", true, CharSequenceLexer.LEX_ALPHA.toString(), "Regular expression used for tokenization.\n" + " Example: \"[\\p{L}\\p{N}_]+|[\\p{P}]+\" (unicode letters, numbers and underscore OR all punctuation) ", null); static CommandOption.Boolean printOutput = new CommandOption.Boolean (Text2Vectors.class, "print-output", "[TRUE|FALSE]", false, false, "If true, print a representation of the processed data\n" + " to standard output. This option is intended for debugging.", null); public static void main (String[] args) throws FileNotFoundException, IOException { // Process the command-line options CommandOption.setSummary (Text2Vectors.class, "A tool for creating instance lists of FeatureVectors or FeatureSequences from text documents.\n"); CommandOption.process (Text2Vectors.class, args); //String[] classDirs = CommandOption.process (Text2Vectors.class, args); // Print some helpful messages for error cases if (args.length == 0) { CommandOption.getList(Text2Vectors.class).printUsage(false); System.exit (-1); } if (classDirs.value.length == 0) { throw new IllegalArgumentException ("You must include --input DIR1 DIR2 ...' in order to specify a " + "list of directories containing the documents for each class."); } // Remove common prefix from all the input class directories int commonPrefixIndex = Strings.commonPrefixIndex (classDirs.value); logger.info ("Labels = "); File[] directories = new File[classDirs.value.length]; for (int i = 0; i < classDirs.value.length; i++) { directories[i] = new File (classDirs.value[i]); if (commonPrefixIndex < classDirs.value.length) { logger.info (" "+classDirs.value[i].substring(commonPrefixIndex)); } else { logger.info (" "+classDirs.value[i]); } } Pipe instancePipe; InstanceList previousInstanceList = null; if (usePipeFromVectorsFile.wasInvoked()) { previousInstanceList = InstanceList.load (usePipeFromVectorsFile.value); instancePipe = previousInstanceList.getPipe(); } else { // Build a new pipe // Create a list of pipes that will be added to a SerialPipes object later ArrayList<Pipe> pipeList = new ArrayList<Pipe>(); // Convert the "target" object into a numeric index // into a LabelAlphabet. pipeList.add(new Target2Label()); // The "data" field is currently a filename. Save it as "source". pipeList.add( new SaveDataInSource() ); // Set "data" to the file's contents. "data" is now a String. pipeList.add( new Input2CharSequence(encoding.value) ); // Optionally save the text to "source" -- not recommended if memory is scarce. if (saveTextInSource.wasInvoked()) { pipeList.add( new SaveDataInSource() ); } // Allow the user to specify an arbitrary Pipe object // that operates on Strings if (stringPipe.wasInvoked()) { pipeList.add( (Pipe) stringPipe.value ); } // Remove all content before the first empty line. // Useful for email and usenet news posts. if (skipHeader.value) { pipeList.add( new CharSubsequence(CharSubsequence.SKIP_HEADER) ); } // Remove HTML tags. Suitable for SGML and XML. if (skipHtml.value) { pipeList.add( new CharSequenceRemoveHTML() ); } // // Tokenize the input: first compile the tokenization pattern // Pattern tokenPattern = null; if (keepSequenceBigrams.value) { // We do not want to record bigrams across punctuation, // so we need to keep non-word tokens. tokenPattern = CharSequenceLexer.LEX_NONWHITESPACE_CLASSES; } else { // Otherwise, try to compile the regular expression pattern. try { tokenPattern = Pattern.compile(tokenRegex.value); } catch (PatternSyntaxException pse) { throw new IllegalArgumentException("The token regular expression (" + tokenRegex.value + ") was invalid: " + pse.getMessage()); } } // Add the tokenizer pipeList.add(new CharSequence2TokenSequence(tokenPattern)); // Allow user to specify an arbitrary Pipe object // that operates on TokenSequence objects. if (tokenPipe.wasInvoked()) { pipeList.add( (Pipe) tokenPipe.value ); } if (! preserveCase.value()) { pipeList.add(new TokenSequenceLowercase()); } if (keepSequenceBigrams.value) { // Remove non-word tokens, but record the fact that they // were there. pipeList.add(new TokenSequenceRemoveNonAlpha(true)); } // Stopword removal. if (stoplistFile.wasInvoked()) { // The user specified a new list TokenSequenceRemoveStopwords stopwordFilter = new TokenSequenceRemoveStopwords(stoplistFile.value, encoding.value, false, // don't include default list false, keepSequenceBigrams.value); if (extraStopwordsFile.wasInvoked()) { stopwordFilter.addStopWords(extraStopwordsFile.value); } pipeList.add(stopwordFilter); } else if (removeStopWords.value) { // The user did not specify a new list, so use the default // built-in English list, possibly adding extra words. TokenSequenceRemoveStopwords stopwordFilter = new TokenSequenceRemoveStopwords(false, keepSequenceBigrams.value); if (extraStopwordsFile.wasInvoked()) { stopwordFilter.addStopWords(extraStopwordsFile.value); } pipeList.add(stopwordFilter); } // gramSizes is an integer array, with default value [1]. // Check if we have a non-default value. if (! (gramSizes.value.length == 1 && gramSizes.value[0] == 1)) { pipeList.add( new TokenSequenceNGrams(gramSizes.value) ); } // So far we have a sequence of Token objects that contain // String values. Look these up in an alphabet and store integer IDs // ("features") instead of Strings. if (keepSequenceBigrams.value) { pipeList.add( new TokenSequence2FeatureSequenceWithBigrams() ); } else { pipeList.add( new TokenSequence2FeatureSequence() ); } // For many applications, we do not need to preserve the sequence of features, // only the number of times times a feature occurs. if (! (keepSequence.value || keepSequenceBigrams.value)) { pipeList.add( new FeatureSequence2AugmentableFeatureVector(binaryFeatures.value) ); } // Allow users to specify an arbitrary Pipe object that operates on // feature vectors. if (featureVectorPipe.wasInvoked()) { pipeList.add( (Pipe) featureVectorPipe.value ); } if (printOutput.value) { pipeList.add(new PrintInputAndTarget()); } instancePipe = new SerialPipes(pipeList); } InstanceList instances = new InstanceList (instancePipe); boolean removeCommonPrefix = true; instances.addThruPipe (new FileIterator (directories, FileIterator.STARTING_DIRECTORIES, removeCommonPrefix)); // write vector file ObjectOutputStream oos; if (outputFile.value.toString().equals ("-")) { oos = new ObjectOutputStream(System.out); } else { oos = new ObjectOutputStream(new FileOutputStream(outputFile.value)); } oos.writeObject(instances); oos.close(); // *rewrite* vector file used as source of pipe in case we changed the alphabet(!) if (usePipeFromVectorsFile.wasInvoked()) { logger.info(" rewriting previous instance list, with ID = " + previousInstanceList.getPipe().getInstanceId()); oos = new ObjectOutputStream(new FileOutputStream(usePipeFromVectorsFile.value)); oos.writeObject(previousInstanceList); oos.close(); } } }
13,251
38.440476
117
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/tui/SvmLight2Classify.java
/* Copyright (C) 2010 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.classify.tui; import java.util.ArrayList; import java.util.Iterator; import java.util.logging.*; import java.util.regex.*; import java.io.*; import java.nio.charset.Charset; import cc.mallet.classify.*; import cc.mallet.pipe.Pipe; import cc.mallet.pipe.SerialPipes; import cc.mallet.pipe.SvmLight2FeatureVectorAndLabel; import cc.mallet.pipe.iterator.*; import cc.mallet.types.*; import cc.mallet.util.*; /** * Command line tool for classifying a sequence of instances directly from text * input, without creating an instance list. * <p> * * @author David Mimno * @author Gregory Druck * @author Siddhartha Jonnalagadda */ public class SvmLight2Classify { private static Logger logger = MalletLogger.getLogger(SvmLight2Classify.class.getName()); static CommandOption.File inputFile = new CommandOption.File( SvmLight2Classify.class, "input", "FILE", true, null, "The file containing data to be classified, one instance per line", null); static CommandOption.File outputFile = new CommandOption.File( SvmLight2Classify.class, "output", "FILE", true, new File("text.vectors"), "Write the instance list to this file; Using - indicates stdout.", null); static CommandOption.String lineRegex = new CommandOption.String( SvmLight2Classify.class, "line-regex", "REGEX", true, "^(\\S*)[\\s,]*(.*)$", "Regular expression containing regex-groups for label, name and data.", null); static CommandOption.Integer nameOption = new CommandOption.Integer( SvmLight2Classify.class, "name", "INTEGER", true, 1, "The index of the group containing the instance name.\n" + " Use 0 to indicate that the name field is not used.", null); static CommandOption.Integer dataOption = new CommandOption.Integer( SvmLight2Classify.class, "data", "INTEGER", true, 2, "The index of the group containing the data.", null); static CommandOption.File classifierFile = new CommandOption.File( SvmLight2Classify.class, "classifier", "FILE", true, new File("classifier"), "Use the pipe and alphabets from a previously created vectors file.\n" + " Allows the creation, for example, of a test set of vectors that are\n" + " compatible with a previously created set of training vectors", null); static CommandOption.String encoding = new CommandOption.String( SvmLight2Classify.class, "encoding", "STRING", true, Charset.defaultCharset().displayName(), "Character encoding for input file", null); public static void main(String[] args) throws FileNotFoundException, IOException { // Process the command-line options CommandOption.setSummary(SvmLight2Classify.class, "A tool for classifying a stream of unlabeled instances"); CommandOption.process(SvmLight2Classify.class, args); // Print some helpful messages for error cases if (args.length == 0) { CommandOption.getList(SvmLight2Classify.class).printUsage(false); System.exit(-1); } if (inputFile == null) { throw new IllegalArgumentException( "You must include `--input FILE ...' in order to specify a" + "file containing the instances, one per line."); } // Read classifier from file Classifier classifier = null; try { ObjectInputStream ois = new ObjectInputStream(new BufferedInputStream( new FileInputStream(classifierFile.value))); classifier = (Classifier) ois.readObject(); ois.close(); } catch (Exception e) { throw new IllegalArgumentException( "Problem loading classifier from file " + classifierFile.value + ": "+ e.getMessage()); } Pipe instancePipe; // Build a new pipe ArrayList<Pipe> pipeList = new ArrayList<Pipe>(); pipeList.add(new SvmLight2FeatureVectorAndLabel()); instancePipe = new SerialPipes(pipeList); InstanceList instances = new InstanceList(instancePipe); Reader fileReader; if (inputFile.equals("-")) { fileReader = new InputStreamReader(System.in); } else { fileReader = new InputStreamReader(new FileInputStream(inputFile.value),encoding.value); } // Read instances from the file instances.addThruPipe(new SelectiveFileLineIterator(fileReader, "^\\s*#.+")); Iterator<Instance> iterator = instances.iterator(); // Write classifications to the output file PrintStream out = null; if (outputFile.value.toString().equals("-")) { out = System.out; } else { out = new PrintStream(outputFile.value, encoding.value); } // [email protected] // Stop growth on the alphabets. If this is not done and new // features are added, the feature and classifier parameter // indices will not match. classifier.getInstancePipe().getDataAlphabet().stopGrowth(); classifier.getInstancePipe().getTargetAlphabet().stopGrowth(); while (iterator.hasNext()) { Instance instance = iterator.next(); Labeling labeling = classifier.classify(instance).getLabeling(); StringBuilder output = new StringBuilder(); output.append(instance.getName()); for (int location = 0; location < labeling.numLocations(); location++) { output.append("\t" + labeling.labelAtLocation(location)); output.append("\t" + labeling.valueAtLocation(location)); } out.println(output); } if (!outputFile.value.toString().equals("-")) { out.close(); } } }
5,623
34.821656
95
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/evaluate/Graph2.java
/* Copyright (C) 2002 Department of Computer Science, University of Massachusetts, Amherst This file is part of "MALET" (MAchine LEarning Toolkit). http://www.cs.umass.edu/~mccallum/malet This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF MASSACHUSETTS AND OTHER CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @author Aron Culotta <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.classify.evaluate; import java.awt.*; import java.util.*; /** * Methods for a 2-D graph */ public class Graph2 extends Graph { int increment; int position; public Graph2(String title, int min, int max, String xLabel, String yLabel) { super(title, min, max, xLabel, yLabel); } public void paint(Graphics g) { super.paint(g); Color temp = g.getColor(); for (int ii = 0; ii < items.size(); ii++) { Vector tempV = new Vector ((Vector)items.elementAt(ii)); GraphItem firstItem = (GraphItem)tempV.firstElement(); int firstAdjustedValue = bottom - (((firstItem.value - min) *(bottom - top))/(max - min)); increment = (right - left)/(tempV.size() - 1); position = left; // g.setColor(firstItem.color); g.setColor(legend.color(ii)); // get color for this data series g.drawString(firstItem.title, position - fm.stringWidth(firstItem.title), firstAdjustedValue - 2); g.fillOval(position - 2, firstAdjustedValue - 2, 4, 4); //g.setColor(temp); for (int i = 0; i < tempV.size() - 1; i++) { GraphItem thisItem = (GraphItem)tempV.elementAt(i); int thisAdjustedValue = bottom - (((thisItem.value - min)* (bottom - top))/(max - min)); GraphItem nextItem = (GraphItem)tempV.elementAt(i+1); int nextAdjustedValue = bottom - (((nextItem.value - min)* (bottom - top))/(max - min)); g.drawLine(position, thisAdjustedValue, position+=increment, nextAdjustedValue); // g.setColor(nextItem.color); if (nextAdjustedValue < thisAdjustedValue) g.drawString(nextItem.title, position - fm.stringWidth(nextItem.title), nextAdjustedValue + titleHeight + 4); else g.drawString(nextItem.title, position - fm.stringWidth(nextItem.title), nextAdjustedValue - 4); g.fillOval(position - 2, nextAdjustedValue - 2, 4, 4); // g.setColor(temp); } g.setColor(temp); } } }
3,843
33.945455
90
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/evaluate/GraphItem.java
/* Copyright (C) 2002 Department of Computer Science, University of Massachusetts, Amherst This file is part of "MALET" (MAchine LEarning Toolkit). http://www.cs.umass.edu/~mccallum/malet This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF MASSACHUSETTS AND OTHER CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @author Aron Culotta <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.classify.evaluate; import java.awt.*; /** * Holds data for a point on a graph */ public class GraphItem { String title; int value; Color color; public GraphItem(String title, int value, Color color) { this.title = title; this.value = value; this.color = color; } }
2,152
35.491525
90
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/evaluate/AccuracyCoverage.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Aron Culotta <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.classify.evaluate; import java.awt.*; import java.awt.event.*; import javax.swing.*; import cc.mallet.classify.Classification; import cc.mallet.classify.Classifier; import cc.mallet.classify.Trial; import cc.mallet.classify.evaluate.GraphItem; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.LabelVector; import cc.mallet.util.MalletLogger; import cc.mallet.util.PrintUtilities; import java.util.*; import java.util.logging.*; import java.text.DecimalFormat; /** * Methods for calculating and displaying the accuracy v. * coverage data for a Trial */ public class AccuracyCoverage implements ActionListener { private static Logger logger = MalletLogger.getLogger(AccuracyCoverage.class.getName()); static final int DEFAULT_NUM_BUCKETS = 20; static final int DEFAULT_MAX_X = 100; private ArrayList classifications; private double [] accuracyValues; private int numBuckets; private double step; private Graph2 graph; private JFrame frame; /** * Constructs object, sorts classifications, and creates * accuracyValues array * @param t trial to get data from * @param numBuckets number of x-axis measurements to find accuracy */ public AccuracyCoverage(Trial t, int numBuckets, String title, String dataName) { this.classifications = t; this.numBuckets = numBuckets; this.step = (double)DEFAULT_MAX_X/numBuckets; this.accuracyValues = new double[numBuckets]; this.frame = null; logger.info("Constructing AccCov with " + this.classifications.size()); sortClassifications(); /* for(int i=0; i<classifications.size(); i++) { Classification c = (Classification)this.classifications.get(i); LabelVector distr = c.getLabelVector(); System.out.println(distr.getBestValue()); } */ createAccuracyArray(); this.graph = new Graph2( title, 0, 100, "Coverage", "Accuracy"); addDataToGraph(this.accuracyValues, numBuckets, dataName); } public AccuracyCoverage(Trial t, String title, String name) { this(t, DEFAULT_NUM_BUCKETS, title, name); } public AccuracyCoverage(Trial t, String title) { this(t, DEFAULT_NUM_BUCKETS, title, "unnamed"); } public AccuracyCoverage(Classifier C, InstanceList ilist, String title) { this(new Trial(C, ilist), DEFAULT_NUM_BUCKETS, title, "unnamed"); } public AccuracyCoverage(Classifier C, InstanceList ilist, int numBuckets, String title) { this(new Trial(C, ilist), numBuckets, title, "unnamed"); } /** * Finds the "area under the acc/cov curve" * steps by one percentage point and calcs area * of trapezoid */ public double cumulativeAccuracy() { double area = 0.0; for(int i=1; i<100; i++) { double leftAccuracy = accuracyAtCoverage((double)i/100); double rightAccuracy = accuracyAtCoverage((double)(i+1)/100); area += .5*(leftAccuracy + rightAccuracy); } return area; } /** * Creates array of accuracy values for coverage * at each step as defined by numBuckets. */ public void createAccuracyArray() { // System.out.println("Creating accuracyArray. Step= "+step); for(int i=0 ; i<numBuckets; i++) { accuracyValues[i] = accuracyAtCoverage(step *(double)(i+1)/100.0); } } /** * accuracy at a given coverage percentage * @param cov coverage percentage * @return accuracy value */ public double accuracyAtCoverage(double cov) { assert(cov <= 1 && cov > 0); int numTrials = (int)(Math.round((double)classifications.size()*cov)); int numCorrect = 0; // System.out.println("NumTrials="+numTrials); for(int i= classifications.size()-1; i >= classifications.size()-numTrials; i--) { Classification temp = (Classification)classifications.get(i); if(temp.bestLabelIsCorrect()) numCorrect++; } // System.out.println("Accuracy at cov "+cov+" is "+ //(double)numCorrect/numTrials); return((double)numCorrect/numTrials); } /** * Sort classifications ArrayList * by winner's value */ public void sortClassifications() { Collections.sort(classifications, new ClassificationComparator()); } public void addDataToGraph(double [] accValues, int nBuckets, String name) { Vector values = new Vector(nBuckets); for(int i=0; i<nBuckets; i++) { GraphItem temp = new GraphItem("", (int)(accValues[i]*100), Color.black); values.add(temp); } logger.info("Sending "+values.size()+" elements to graph"); this.graph.addItemVector(values, name); } /** * Displays the accuracy v. coverage graph */ public void displayGraph() { Vector values = new Vector(this.numBuckets); JButton printButton = new JButton("Print"); frame = new JFrame("Graph"); DecimalFormat df = new DecimalFormat(); printButton.addActionListener(this); frame.addWindowListener (new WindowAdapter() { public void windowClosing(WindowEvent e) { System.exit(0); } } ); // Get content pane Container pane = frame.getContentPane(); // Set layout manager pane.setLayout( new FlowLayout() ); assert(graph!= null); // make sure we've got data in the graph // Add to pane pane.add( graph ); pane.add( printButton ); frame.pack(); // Center the frame Toolkit toolkit = Toolkit.getDefaultToolkit(); // Get the current screen size Dimension scrnsize = toolkit.getScreenSize(); // Get the frame size Dimension framesize= frame.getSize(); // Set X,Y location frame.setLocation ( (int) (scrnsize.getWidth() - frame.getWidth() ) / 2 , (int) (scrnsize.getHeight() - frame.getHeight()) / 2); frame.setVisible(true); } public void actionPerformed(ActionEvent event) { PrintUtilities.printComponent(graph); } public void addTrial(Trial t, String name) { addTrial(t, DEFAULT_NUM_BUCKETS, name); } public void addTrial(Trial t, int nBuckets, String name) { AccuracyCoverage newData = new AccuracyCoverage(t, nBuckets, "untitled", name); double [] accValues = newData.accuracyValues(); addDataToGraph(accValues, nBuckets, name); } public double[] accuracyValues() { return this.accuracyValues; } public class ClassificationComparator implements Comparator { public final int compare (Object a, Object b) { LabelVector x = (LabelVector) (((Classification)a).getLabelVector()); LabelVector y = (LabelVector) (((Classification)b).getLabelVector()); double difference = x.getBestValue() - y.getBestValue(); int toReturn = 0; if(difference > 0) toReturn = 1; else if (difference < 0) toReturn = -1; return(toReturn); } } }
7,196
25.459559
89
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/evaluate/ConfusionMatrix.java
/* Copyright (C) 2002 Dept. of Computer Science, Univ. of Massachusetts, Amherst This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This program toolkit free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. For more details see the GNU General Public License and the file README-LEGAL. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.classify.evaluate; import java.util.ArrayList; import java.util.HashMap; import java.util.logging.*; import java.text.*; import cc.mallet.classify.Classification; import cc.mallet.classify.Trial; import cc.mallet.types.Instance; import cc.mallet.types.Label; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.LabelVector; import cc.mallet.types.Labeling; import cc.mallet.types.MatrixOps; import cc.mallet.util.MalletLogger; /** * Calculates and prints confusion matrix, accuracy, * and precision for a given clasification trial. */ public class ConfusionMatrix { private static Logger logger = MalletLogger.getLogger(ConfusionMatrix.class.getName()); int numClasses; /** * the list of classifications from the trial */ ArrayList classifications; /** * 2-d confiusion matrix */ int[][] values; Trial trial; /** * Constructs matrix and calculates values * @param t the trial to build matrix from */ public ConfusionMatrix(Trial t) { this.trial = t; this.classifications = t; Labeling tempLabeling = ((Classification)classifications.get(0)).getLabeling(); this.numClasses = tempLabeling.getLabelAlphabet().size(); values = new int[numClasses][numClasses]; for(int i=0; i < classifications.size(); i++) { LabelVector lv = ((Classification)classifications.get(i)).getLabelVector(); Instance inst = ((Classification)classifications.get(i)).getInstance(); int bestIndex = lv.getBestIndex(); int correctIndex = inst.getLabeling().getBestIndex(); assert(correctIndex != -1); //System.out.println("Best index="+bestIndex+". Correct="+correctIndex); values[correctIndex][bestIndex]++; } } /** Return the count at row i (true) , column j (predicted) */ double value(int i, int j) { assert(i >= 0 && j >= 0 && i < numClasses && j < numClasses); return values[i][j]; } static private void appendJustifiedInt (StringBuffer sb, int i, boolean zeroDot) { if (i < 100) sb.append (' '); if (i < 10) sb.append (' '); if (i == 0 && zeroDot) sb.append ("."); else sb.append (""+i); } public String toString () { StringBuffer sb = new StringBuffer (); int maxLabelNameLength = 0; LabelAlphabet labelAlphabet = trial.getClassifier().getLabelAlphabet(); for (int i = 0; i < numClasses; i++) { int len = labelAlphabet.lookupLabel(i).toString().length(); if (maxLabelNameLength < len) maxLabelNameLength = len; } sb.append ("Confusion Matrix, row=true, column=predicted accuracy="+trial.getAccuracy()+"\n"); for (int i = 0; i < maxLabelNameLength-5+4; i++) sb.append (' '); sb.append ("label"); for (int c2 = 0; c2 < Math.min(10,numClasses); c2++) sb.append (" "+c2); for (int c2 = 10; c2 < numClasses; c2++) sb.append (" "+c2); sb.append (" |total\n"); for (int c = 0; c < numClasses; c++) { appendJustifiedInt (sb, c, false); String labelName = labelAlphabet.lookupLabel(c).toString(); for (int i = 0; i < maxLabelNameLength-labelName.length(); i++) sb.append (' '); sb.append (" "+labelName+" "); for (int c2 = 0; c2 < numClasses; c2++) { appendJustifiedInt (sb, values[c][c2], true); sb.append (' '); } sb.append (" |"+ MatrixOps.sum(values[c])); sb.append ('\n'); } return sb.toString(); } /** * Returns the precision of this predicted class */ public double getPrecision (int predictedClassIndex) { int total = 0; for (int trueClassIndex=0; trueClassIndex < this.numClasses; trueClassIndex++) { total += values[trueClassIndex][predictedClassIndex]; } if (total == 0) return 0.0; else return (double) (values[predictedClassIndex][predictedClassIndex]) / total; } /** * Returns percent of time that class2 is true class when * class1 is predicted class * */ public double getConfusionBetween (int class1, int class2) { int total = 0; for (int trueClassIndex=0; trueClassIndex < this.numClasses; trueClassIndex++) { total += values[trueClassIndex][class1]; } if (total == 0) return 0.0; else return (double) (values[class2][class1]) / total; } /** * Returns the percentage of instances with * true label = classIndex */ public double getClassPrior (int classIndex) { int sum= 0; for(int i=0; i < numClasses; i++) sum += values[classIndex][i]; return (double)sum / classifications.size(); } /** * prints to stdout the confusion matrix, * class frequency, precision, and recall */ /* public void print() { double totalPrecision = 0; double totalRecall = 0; double totalF1 = 0; HashMap index2class = new HashMap(); LabelVector lv = ((Classification)classifications.get(0)).getLabelVector(); DecimalFormat df = new DecimalFormat("###.##"); int [] numInstances = new int[this.numClasses]; for(int i=0; i<this.numClasses; i++){ int count = 0; for(int j=0; j<this.numClasses; j++) count += values[i][j]; numInstances[i] = count; String label = lv.labelAtLocation(i).toString(); System.out.println("index "+i+": "+label+ " "+count+" instances "+ df.format(100*(double)count/classifications.size()) +"%"); index2class.put (new Integer (i), label); } System.out.println("Confusion Matrix"); for(int i=0; i<this.numClasses; i++){ for(int j=0; j<this.numClasses; j++) System.out.print(values[j][i]+"\t\t"); System.out.println(""); } for(int i=0; i<this.numClasses; i++){ double recall = 100.0*(double)values[j][j]/numInstances[i]; double precision; int rowCount = 0; for(int j=0; j<this.numClasses; j++) rowCount += values[j][i]; if (rowCount == 0) precision = 0; else precision = 100.0*(double)values[j][j] / rowCount; double f1; if (precision + recall == 0.0) f1 = 0; else f1 = 2 * precision * recall / (precision + recall); System.out.println("Class " + (String)index2class.get(new Integer (i))); System.out.println("F1="+df.format(f1)+"%"); System.out.println("Recall="+df.format(recall)+"%"); System.out.println("Precision="+df.format(precision)+"%"); totalPrecision += precision; totalRecall += recall; totalF1 += f1; } int numCorrect = 0; int totalInstances = 0; for(int i=0; i<this.numClasses; i++) { numCorrect += values[j][j]; totalInstances+=numInstances[i]; } System.out.println("Overall Accuracy="+ df.format(100.0*(double)numCorrect/totalInstances)+"%"); System.out.println ("Average F1: " + (totalF1 / this.numClasses) + "\nAverage Precision: " + (totalPrecision / this.numClasses) + "\nAverage Recall: " + (totalRecall / this.numClasses)); } */ }
7,781
27.610294
97
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/classify/evaluate/Graph.java
/* Copyright (C) 2002 Department of Computer Science, University of Massachusetts, Amherst This file is part of "MALET" (MAchine LEarning Toolkit). http://www.cs.umass.edu/~mccallum/malet This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF MASSACHUSETTS AND OTHER CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @author Aron Culotta <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.classify.evaluate; import java.awt.*; import java.util.*; import cc.mallet.classify.evaluate.*; /** * Framework for standard graph. Can hold up to N data series */ public class Graph extends Canvas { int top; int bottom; int left; int right; int titleHeight; int labelWidth; FontMetrics fm; int padding = 4; String title; String xLabel; String yLabel; int xLabelHeight; int yLabelWidth; int min; int max; int xmin; int xmax; Legend legend; Vector items; //2d vector - one column for each series /** * Creates a Graph object * @param title Title of graph * @param min minimum y value * @param max maximum y value * @param xLabel label for x axis * @param yLabel label for y axis */ public Graph(String title, int min, int max, String xLabel, String yLabel) { this.title = title; this.min = min; this.max = max; this.xLabel = xLabel; this.yLabel = yLabel; this.legend = new Legend(); items = new Vector(); } /** * Set bounds of canvas */ public void setBounds(int x, int y, int width, int height) { super.setBounds(x, y, width, height); fm = getFontMetrics(getFont()); titleHeight = fm.getHeight(); yLabelWidth = fm.stringWidth(yLabel); xLabelHeight = fm.getHeight(); labelWidth = Math.max(fm.stringWidth(new Integer(min).toString()), fm.stringWidth(new Integer(max).toString())) + 2; top = padding + titleHeight; bottom = getSize().height - padding - xLabelHeight - fm.getHeight(); left = padding + yLabelWidth; right = getSize().width - padding; } /** * Paint the graph outline */ public void paint(Graphics g) { // set xmin, xmax based on item vector // TODO: make this user defined xmin=0; xmax=100; // draw the title fm = getFontMetrics(getFont()); g.drawString(title, (getSize().width - fm.stringWidth(title))/2, top - padding); // draw the labels g.drawString(yLabel, 0, getSize().height/2); g.drawString(xLabel, (getSize().width - fm.stringWidth(xLabel))/2 ,bottom + fm.getHeight()); // draw the max and min values g.drawString(new Integer(min).toString(), left - padding - fm.stringWidth(new Integer(min).toString()), bottom); g.drawString(new Integer(max).toString(), left - padding - fm.stringWidth(new Integer(max).toString()), top + titleHeight); g.drawString(new Integer(xmin).toString(), left, bottom + fm.getHeight()); g.drawString(new Integer(xmax).toString(), right - fm.stringWidth(new Integer(xmax).toString()), bottom + fm.getHeight()); // draw the vertical and horizontal lines g.drawLine(left, top, left, bottom); g.drawLine(left, bottom, right, bottom); // draw legend int legendHeight = fm.getHeight() * legend.size(); int legendTop = bottom - legendHeight - padding - 8; g.drawRect((getSize().width/2)-padding, legendTop-fm.getHeight()-padding, fm.stringWidth(legend.longestString())+2*padding, legendHeight+2*padding); for(int i=0; i<legend.size(); i++) { g.setColor(legend.color(i)); g.drawString(legend.name(i), (getSize().width)/2, legendTop + i*fm.getHeight()); } } public Dimension getPreferredSize() { return(new Dimension(500, 400)); } /** * Adds a new data series * @param newItems Vector of GraphItems */ public void addItemVector(Vector newItems, String name) { items.add(newItems); legend.add(name); } public void addItem(String name, int value, Color col) { items.addElement(new GraphItem(name, value, col)); } public void addItem(String name, int value) { items.addElement(new GraphItem(name, value, Color.black)); } public void removeItem(String name) { for (int i = 0; i < items.size(); i++) { if (((GraphItem)items.elementAt(i)).title.equals(name)) items.removeElementAt(i); } } public class Legend { Vector series; Vector colors; public Legend() { series = new Vector(); colors = new Vector(); } public void add(String name) { series.add(name); if(colors.isEmpty()) //first item added colors.add(Color.black); else { float[] compArray = new float[4]; Color prevColor = (Color)colors.get(colors.size()-1); // colors.add(prevColor.brighter()); compArray = prevColor.getRGBComponents(compArray); compArray[3] = compArray[3] * (float).5; // halve alpha value colors.add(new Color(compArray[0], compArray[1], compArray[2], compArray[3])); } } public Color color(int i) { return (Color)colors.get(i); } public String name(int i) { return (String)series.get(i); } public int size() { return colors.size(); } public String longestString() { String longest = new String(""); // init to shortest string for(int i=0; i<series.size(); i++) { String temp = (String) series.get(i); if(temp.length() > longest.length()) longest = temp; } return longest; } } }
6,824
27.08642
94
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/DirectoryFilter.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; import java.io.*; public class DirectoryFilter implements FileFilter { public boolean accept (File f) { return f.isDirectory (); } }
687
25.461538
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/FileUtils.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.util; import java.io.*; import java.util.ArrayList; import java.util.zip.GZIPInputStream; import java.util.zip.GZIPOutputStream; /** * Contains static utilities for manipulating files. * * Created: Thu Nov 20 15:14:16 2003 * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: FileUtils.java,v 1.1 2007/10/22 21:37:40 mccallum Exp $ */ public class FileUtils { private FileUtils() {} // All static methods /** * Serializes an object to a file, masking out annoying exceptions. * Any IO exceptions are caught, and printed to standard error. * Consider using {@link #writeGzippedObject(java.io.File, java.io.Serializable)} * instead, for that method will compress the serialized file, and it'll still * be reaoable by {@link #readObject}. * @param f File to write to * @param obj Object to serialize * @see #writeGzippedObject(java.io.File, java.io.Serializable) */ public static void writeObject (File f, Serializable obj) { try { ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(f)); oos.writeObject(obj); oos.close(); } catch (IOException e) { System.err.println("Exception writing file " + f + ": " + e); } } /** * Reads a Serialized object, which may or may not be zipped. * Guesses from the file name whether to decompress or not. * @param f File to read data from * @return A deserialized object. */ public static Object readObject (File f) { String fname = f.getName (); if (fname.endsWith (".gz")) { return readGzippedObject (f); } else { return readUnzippedObject (f); } } /** * Reads a serialized object from a file. * You probably want to use {@link #readObject} instead, because that method will automatically guess * from the extension whether the file is compressed, and call this method if necessary. * @param f File to read object from * @see #readObject */ public static Object readUnzippedObject (File f) { try { ObjectInputStream ois = new ObjectInputStream(new FileInputStream(f)); Object obj = ois.readObject(); ois.close (); return obj; } catch (IOException e) { throw new RuntimeException (e); } catch (ClassNotFoundException e) { throw new RuntimeException (e); } } /** * Reads every line from a given text file. * @param f Input file. * @return String[] Array containing each line in <code>f</code>. */ public static String[] readFile (File f) throws IOException { BufferedReader in = new BufferedReader(new FileReader (f)); ArrayList list = new ArrayList (); String line; while ((line = in.readLine()) != null) list.add (line); return (String[]) list.toArray(new String[0]); } /** * Creates a file, making sure that its name is unique. * The file will be created as if by <tt>new File(dir, prefix+i+extension)</tt>, * where i is an integer chosen such that the returned File does not exist. * @param dir Directory to use for the returned file * @param prefix Prefix of the file name (before the uniquifying integer) * @param extension Suffix of the file name (after the uniquifying integer) */ public static File uniqueFile (File dir, String prefix, String extension) throws IOException { File f = null; int i = 0; boolean wasCreated = false; while (!wasCreated) { if (dir != null) { f = new File (dir, prefix+i+extension); } else { f = new File (prefix+i+extension); } wasCreated = f.createNewFile (); i++; } return f; } /** * Writes a serialized version of obj to a given file, compressing it using gzip. * @param f File to write to * @param obj Object to serialize */ public static void writeGzippedObject (File f, Serializable obj) { try { ObjectOutputStream oos = new ObjectOutputStream (new BufferedOutputStream (new GZIPOutputStream (new FileOutputStream(f)))); oos.writeObject(obj); oos.close(); } catch (IOException e) { System.err.println("Exception writing file " + f + ": " + e); } } /** * Reads a serialized object from a file that has been compressed using gzip. * You probably want to use {@link #readObject} instead, because it will automatically guess * from the extension whether the file is compressed, and call this method if necessary. * @param f Compressed file to read object from * @see #readObject */ public static Object readGzippedObject (File f) { try { ObjectInputStream ois = new ObjectInputStream (new BufferedInputStream (new GZIPInputStream (new FileInputStream(f)))); Object obj = ois.readObject(); ois.close (); return obj; } catch (IOException e) { throw new RuntimeException (e); } catch (ClassNotFoundException e) { throw new RuntimeException (e); } } } // FileUtils
5,357
29.617143
130
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/Maths.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; // Math and statistics functions public final class Maths { // From libbow, dirichlet.c // Written by Tom Minka <[email protected]> public static final double logGamma (double x) { double result, y, xnum, xden; int i; final double d1 = -5.772156649015328605195174e-1; final double p1[] = { 4.945235359296727046734888e0, 2.018112620856775083915565e2, 2.290838373831346393026739e3, 1.131967205903380828685045e4, 2.855724635671635335736389e4, 3.848496228443793359990269e4, 2.637748787624195437963534e4, 7.225813979700288197698961e3 }; final double q1[] = { 6.748212550303777196073036e1, 1.113332393857199323513008e3, 7.738757056935398733233834e3, 2.763987074403340708898585e4, 5.499310206226157329794414e4, 6.161122180066002127833352e4, 3.635127591501940507276287e4, 8.785536302431013170870835e3 }; final double d2 = 4.227843350984671393993777e-1; final double p2[] = { 4.974607845568932035012064e0, 5.424138599891070494101986e2, 1.550693864978364947665077e4, 1.847932904445632425417223e5, 1.088204769468828767498470e6, 3.338152967987029735917223e6, 5.106661678927352456275255e6, 3.074109054850539556250927e6 }; final double q2[] = { 1.830328399370592604055942e2, 7.765049321445005871323047e3, 1.331903827966074194402448e5, 1.136705821321969608938755e6, 5.267964117437946917577538e6, 1.346701454311101692290052e7, 1.782736530353274213975932e7, 9.533095591844353613395747e6 }; final double d4 = 1.791759469228055000094023e0; final double p4[] = { 1.474502166059939948905062e4, 2.426813369486704502836312e6, 1.214755574045093227939592e8, 2.663432449630976949898078e9, 2.940378956634553899906876e10, 1.702665737765398868392998e11, 4.926125793377430887588120e11, 5.606251856223951465078242e11 }; final double q4[] = { 2.690530175870899333379843e3, 6.393885654300092398984238e5, 4.135599930241388052042842e7, 1.120872109616147941376570e9, 1.488613728678813811542398e10, 1.016803586272438228077304e11, 3.417476345507377132798597e11, 4.463158187419713286462081e11 }; final double c[] = { -1.910444077728e-03, 8.4171387781295e-04, -5.952379913043012e-04, 7.93650793500350248e-04, -2.777777777777681622553e-03, 8.333333333333333331554247e-02, 5.7083835261e-03 }; final double a = 0.6796875; if((x <= 0.5) || ((x > a) && (x <= 1.5))) { if(x <= 0.5) { result = -Math.log(x); /* Test whether X < machine epsilon. */ if(x+1 == 1) { return result; } } else { result = 0; x = (x - 0.5) - 0.5; } xnum = 0; xden = 1; for(i=0;i<8;i++) { xnum = xnum * x + p1[i]; xden = xden * x + q1[i]; } result += x*(d1 + x*(xnum/xden)); } else if((x <= a) || ((x > 1.5) && (x <= 4))) { if(x <= a) { result = -Math.log(x); x = (x - 0.5) - 0.5; } else { result = 0; x -= 2; } xnum = 0; xden = 1; for(i=0;i<8;i++) { xnum = xnum * x + p2[i]; xden = xden * x + q2[i]; } result += x*(d2 + x*(xnum/xden)); } else if(x <= 12) { x -= 4; xnum = 0; xden = -1; for(i=0;i<8;i++) { xnum = xnum * x + p4[i]; xden = xden * x + q4[i]; } result = d4 + x*(xnum/xden); } /* X > 12 */ else { y = Math.log(x); result = x*(y - 1) - y*0.5 + .9189385332046727417803297; x = 1/x; y = x*x; xnum = c[6]; for(i=0;i<6;i++) { xnum = xnum * y + c[i]; } xnum *= x; result += xnum; } return result; } // This is from "Numeric Recipes in C" public static double oldLogGamma (double x) { int j; double y, tmp, ser; double [] cof = {76.18009172947146, -86.50532032941677 , 24.01409824083091, -1.231739572450155 , 0.1208650973866179e-2, -0.5395239384953e-5}; y = x; tmp = x + 5.5 - (x + 0.5) * Math.log (x + 5.5); ser = 1.000000000190015; for (j = 0; j <= 5; j++) ser += cof[j] / ++y; return Math.log (2.5066282746310005 * ser / x) - tmp; } public static double logBeta (double a, double b) { return logGamma(a)+logGamma(b)-logGamma(a+b); } public static double beta (double a, double b) { return Math.exp (logBeta(a,b)); } public static double gamma (double x) { return Math.exp (logGamma(x)); } public static double factorial (int n) { return Math.exp (logGamma(n+1)); } public static double logFactorial (int n) { return logGamma(n+1); } /** * Computes p(x;n,p) where x~B(n,p) */ // Copied as the "classic" method from Catherine Loader. // Fast and Accurate Computation of Binomial Probabilities. // 2001. (This is not the fast and accurate version.) public static double logBinom (int x, int n, double p) { return logFactorial (n) - logFactorial (x) - logFactorial (n - x) + (x*Math.log (p)) + ((n-x)*Math.log (1-p)); } /** * Vastly inefficient O(x) method to compute cdf of B(n,p) */ public static double pbinom (int x, int n, double p) { double sum = Double.NEGATIVE_INFINITY; for (int i = 0; i <= x; i++) { sum = sumLogProb (sum, logBinom (i, n, p)); } return Math.exp (sum); } public static double sigmod(double beta){ return (double)1.0/(1.0+Math.exp(-beta)); } public static double sigmod_rev(double sig){ return (double)Math.log(sig/(1-sig)); } public static double logit (double p) { return Math.log (p / (1 - p)); } // Combination? public static double numCombinations (int n, int r) { return Math.exp (logFactorial(n)-logFactorial(r)-logFactorial(n-r)); } // Permutation? public static double numPermutations (int n, int r) { return Math.exp (logFactorial(n)-logFactorial(r)); } public static double cosh (double a) { if (a < 0) return 0.5 * (Math.exp(-a) + Math.exp(a)); else return 0.5 * (Math.exp(a) + Math.exp(-a)); } public static double tanh (double a) { return (Math.exp(a) - Math.exp(-a)) / (Math.exp(a) + Math.exp(-a)); } /** * Numbers that are closer than this are considered equal * by almostEquals. */ public static double EPSILON = 0.000001; public static boolean almostEquals (double d1, double d2) { return almostEquals (d1, d2, EPSILON); } public static boolean almostEquals (double d1, double d2, double epsilon) { return Math.abs (d1 - d2) < epsilon; } public static boolean almostEquals (double[] d1, double[] d2, double eps) { for (int i = 0; i < d1.length; i++) { double v1 = d1[i]; double v2 = d2[i]; if (!almostEquals (v1, v2, eps)) return false; } return true; } // gsc /** * Checks if <tt>min &lt;= value &lt;= max</tt>. */ public static boolean checkWithinRange(double value, double min, double max) { return (value > min || almostEquals(value, min, EPSILON)) && (value < max || almostEquals(value, max, EPSILON)); } public static final double log2 = Math.log(2); // gsc /** * Returns the KL divergence, K(p1 || p2). * * The log is w.r.t. base 2. <p> * * *Note*: If any value in <tt>p2</tt> is <tt>0.0</tt> then the KL-divergence * is <tt>infinite</tt>. * */ public static double klDivergence(double[] p1, double[] p2) { assert (p1.length == p2.length); double klDiv = 0.0; for (int i = 0; i < p1.length; ++i) { if (p1[i] == 0) { continue; } if (p2[i] == 0) { return Double.POSITIVE_INFINITY; } klDiv += p1[i] * Math.log(p1[i] / p2[i]); } return klDiv / log2; // moved this division out of the loop -DM } // gsc /** * Returns the Jensen-Shannon divergence. */ public static double jensenShannonDivergence(double[] p1, double[] p2) { assert(p1.length == p2.length); double[] average = new double[p1.length]; for (int i = 0; i < p1.length; ++i) { average[i] += (p1[i] + p2[i])/2; } return (klDivergence(p1, average) + klDivergence(p2, average))/2; } /** * Returns the sum of two doubles expressed in log space, * that is, * <pre> * sumLogProb = log (e^a + e^b) * = log e^a(1 + e^(b-a)) * = a + log (1 + e^(b-a)) * </pre> * * By exponentiating <tt>b-a</tt>, we obtain better numerical precision than * we would if we calculated <tt>e^a</tt> or <tt>e^b</tt> directly. * <P> * Note: This function is just like * {@link cc.mallet.fst.Transducer#sumNegLogProb sumNegLogProb} * in <TT>Transducer</TT>, * except that the logs aren't negated. */ public static double sumLogProb (double a, double b) { if (a == Double.NEGATIVE_INFINITY) return b; else if (b == Double.NEGATIVE_INFINITY) return a; else if (b < a) return a + Math.log (1 + Math.exp(b-a)); else return b + Math.log (1 + Math.exp(a-b)); } // Below from Stanford NLP package, SloppyMath.java private static final double LOGTOLERANCE = 30.0; /** * Sums an array of numbers log(x1)...log(xn). This saves some of * the unnecessary calls to Math.log in the two-argument version. * <p> * Note that this implementation IGNORES elements of the input * array that are more than LOGTOLERANCE (currently 30.0) less * than the maximum element. * <p> * Cursory testing makes me wonder if this is actually much faster than * repeated use of the 2-argument version, however -cas. * @param vals An array log(x1), log(x2), ..., log(xn) * @return log(x1+x2+...+xn) */ public static double sumLogProb (double[] vals) { double max = Double.NEGATIVE_INFINITY; int len = vals.length; int maxidx = 0; for (int i = 0; i < len; i++) { if (vals[i] > max) { max = vals[i]; maxidx = i; } } boolean anyAdded = false; double intermediate = 0.0; double cutoff = max - LOGTOLERANCE; for (int i = 0; i < maxidx; i++) { if (vals[i] >= cutoff) { anyAdded = true; intermediate += Math.exp(vals[i] - max); } } for (int i = maxidx + 1; i < len; i++) { if (vals[i] >= cutoff) { anyAdded = true; intermediate += Math.exp(vals[i] - max); } } if (anyAdded) { return max + Math.log(1.0 + intermediate); } else { return max; } } /** * Returns the difference of two doubles expressed in log space, * that is, * <pre> * sumLogProb = log (e^a - e^b) * = log e^a(1 - e^(b-a)) * = a + log (1 - e^(b-a)) * </pre> * * By exponentiating <tt>b-a</tt>, we obtain better numerical precision than * we would if we calculated <tt>e^a</tt> or <tt>e^b</tt> directly. * <p> * Returns <tt>NaN</tt> if b > a (so that log(e^a - e^b) is undefined). */ public static double subtractLogProb (double a, double b) { if (b == Double.NEGATIVE_INFINITY) return a; else return a + Math.log (1 - Math.exp(b-a)); } public static double getEntropy(double[] dist) { double entropy = 0; for (int i = 0; i < dist.length; i++) { if (dist[i] != 0) { entropy -= dist[i] * Math.log(dist[i]); } } return entropy; } }
11,716
26.699764
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/UriUtils.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; import java.util.regex.Pattern; import java.util.regex.Matcher; import java.net.URI; import java.io.File; public class UriUtils { // This regex induced from http://www.ietf.org/rfc/rfc2396.txt static Pattern schemeRegex = Pattern.compile ("\\p{Alpha}[\\p{Alnum}\\+\\.-]*:"); private static String defaultFileSchema (String string) { // If "string" does not have a URI scheme (e.g. "file:" or "http:") // then assume a default of "file:" and add it. Matcher matcher = schemeRegex.matcher (string); if (!matcher.lookingAt()) string = "file:" + string; return string; } public static URI objectToUri (Object obj) { try { if (obj instanceof String) // If the string has no schema, assume that it is the "file:" schema return new URI (defaultFileSchema((String)obj)); if (obj instanceof File) return new URI ("file:" + ((File)obj).getAbsolutePath()); else return new URI (obj.toString()); } catch (Exception e) { throw new IllegalArgumentException ("UriUtils.objectToUri: " + e.toString()); } } /** Convert a string-representation of a URI into a string that could be a filename. Do this by substituting '/' for '+', and '++' for '+'. For example, "http://www.cs.umass.edu/faculty" becomes "http:++www.cs.umass.edu+faculty". */ public static String uriStringToFilename (String uri) { StringBuffer sb = new StringBuffer(); char c; for (int i = 0; i < uri.length(); i++) { c = uri.charAt(i); if (c == File.pathSeparatorChar) sb.append ('+'); else if (c == '+') sb.append ("++"); else sb.append (c); } return sb.toString(); } public static String filenameToUriString (String filename) { StringBuffer sb = new StringBuffer(); char c; for (int i = 0; i < filename.length(); i++) { c = filename.charAt(i); if (c == '+') { if (i < filename.length() && filename.charAt(i+1) == '+') { sb.append ('+'); i++; } else sb.append (File.pathSeparator); } else sb.append (c); } return sb.toString(); } }
2,593
26.595745
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/MVNormal.java
package cc.mallet.util; import java.util.Arrays; import java.text.NumberFormat; import cc.mallet.util.Randoms; import cc.mallet.types.*; /** Tools for working with multivariate normal distributions */ public class MVNormal { /** Simple Cholesky decomposition, with no checks on squareness, * symmetricality, or positive definiteness. This follows the * implementation from JAMA fairly closely. * <p> * Returns L such that LL' = A and L is lower-triangular. */ public static double[] cholesky(double[] input, int numRows) { // Initialize the result. Note that java sets all elements to 0. double[] result = new double[ input.length ]; double sumRowSquared = 0.0; double dotProduct = 0.0; // For each off-diagonal cell l_{jk} in the result, // we need an index into the jth row and the kth row. // These are therefore both really row offsets, but one // corresponds to the beginning of the (row)th row // and the other to the beginning of the (column)th row. int rowOffset = 0; int colOffset = 0; for (int row = 0; row < numRows; row++) { sumRowSquared = 0.0; rowOffset = row * numRows; for (int col = 0; col < row; col++) { dotProduct = 0.0; colOffset = col * numRows; for (int i = 0; i < col; i++) { dotProduct += result[ rowOffset + i ] * result[ colOffset + i ]; } result[ rowOffset + col ] = (input[ rowOffset + col ] - dotProduct) / result[ colOffset + col ]; sumRowSquared += result[rowOffset + col] * result[rowOffset + col]; } // Now the diagonal element result[ rowOffset + row ] = Math.sqrt(input[ rowOffset + row ] - sumRowSquared); } return result; } public static double[] bandCholesky(double[] input, int numRows) { // Initialize the result. Note that java sets all elements to 0. double[] result = new double[ input.length ]; double sumRowSquared = 0.0; double dotProduct = 0.0; // For each off-diagonal cell l_{jk} in the result, // we need an index into the jth row and the kth row. // These are therefore both really row offsets, but one // corresponds to the beginning of the (row)th row // and the other to the beginning of the (column)th row. int rowOffset = 0; int colOffset = 0; int firstNonZero; for (int row = 0; row < numRows; row++) { sumRowSquared = 0.0; rowOffset = row * numRows; firstNonZero = row; for (int col = 0; col < row; col++) { if (firstNonZero == row) { if (input[ rowOffset + col ] == 0) { continue; } else { firstNonZero = col; } } dotProduct = 0.0; colOffset = col * numRows; for (int i = firstNonZero; i < col; i++) { dotProduct += result[ rowOffset + i ] * result[ colOffset + i ]; } result[ rowOffset + col ] = (input[ rowOffset + col ] - dotProduct) / result[ colOffset + col ]; sumRowSquared += result[rowOffset + col] * result[rowOffset + col]; } // Now the diagonal element result[ rowOffset + row ] = Math.sqrt(input[ rowOffset + row ] - sumRowSquared); } return result; } /** For testing band cholesky factorization */ public static double[] bandMatrixRoot (int dim, int bandwidth) { double[] result = new double[dim * dim]; for (int row = 0; row < dim; row++) { int rowOffset = row * dim; for (int col = Math.max(0, (row - bandwidth + 1)); col <= row; col++) { result[rowOffset + col] = 1.0; } } return result; } /** Sample a multivariate normal from a precision matrix * (ie inverse covariance matrix) */ public static double[] nextMVNormal(double[] mean, double[] precision, Randoms random) { return nextMVNormalWithCholesky(mean, cholesky(precision, mean.length), random); } public static double[] nextMVNormalWithCholesky(double[] mean, double[] precisionLowerTriangular, Randoms random) { int n = mean.length; // Initialize vector z to standard normals // [NB: using the same array for z and x] double[] result = new double[ n ]; for (int i = 0; i < n; i++) { result[i] = random.nextGaussian(); } // Now solve trans(L) x = z using back substitution double innerProduct; for (int i = n-1; i >= 0; i--) { innerProduct = 0.0; for (int j = i+1; j < n; j++) { // the cholesky decomp got us the precisionLowerTriangular triangular // matrix, but we really want the transpose. innerProduct += result[j] * precisionLowerTriangular[ (n * j) + i ]; } result[i] = (result[i] - innerProduct) / precisionLowerTriangular[ (n * i) + i ]; } for (int i = 0; i < n; i++) { result[i] += mean[i]; } return result; } /** Sample a vector x from N(m, (LL')<sup>-1</sup>, such that * sum_i x_i = 0. */ public static double[] nextZeroSumMVNormalWithCholesky(double[] mean, double[] precisionLowerTriangular, Randoms random) { int n = mean.length; double[] result = nextMVNormalWithCholesky(mean, precisionLowerTriangular, random); double sum = 0.0; for (int i = 0; i < n; i++) { sum += result[i]; } // get the sum of each row of the inverse precision matrix // by solving the two triangular systems L(L'x) = Ly = 1, L'x = y. double[] ones = new double[n]; Arrays.fill(ones, 1.0); double[] firstSolution = solveWithForwardSubstitution(ones, precisionLowerTriangular); double[] rowSums = solveWithBackSubstitution(firstSolution, precisionLowerTriangular); double sumOfRowSums = 0.0; for (int i = 0; i < n; i++) { sumOfRowSums += rowSums[i]; } double inverseSumOfRowSums = 1.0 / sumOfRowSums; for (int i = 0; i < n; i++) { result[i] -= inverseSumOfRowSums * rowSums[i] * sum; } return result; } public static double[][] nextMVNormal(int n, double[] mean, double[] precision, Randoms random) { double[][] result = new double[n][]; for (int i=0; i < n; i++) { result[i] = nextMVNormal(mean, precision, random); } return result; } public static FeatureVector nextFeatureVector(Alphabet alphabet, double[] mean, double[] precision, Randoms random) { return new FeatureVector(alphabet, nextMVNormal(mean, precision, random)); } /** * @param priorMean A vector of mean values * @param priorPrecisionDiagonal A vector representing a diagonal prior precision matrix * @param precision A precision matrix */ public static double[] nextMVNormalPosterior(double[] priorMean, double[] priorPrecisionDiagonal, double[] precision, double[] observedMean, int observations, Randoms random) { int dimension = priorMean.length; // Q_0 mu_0 + n Q y_bar double[] linearCombination = new double[dimension]; for (int i=0; i<dimension; i++) { linearCombination[i] = priorMean[i] * priorPrecisionDiagonal[i]; double innerProduct = 0.0; for (int j = 0; j < dimension; j++) { innerProduct += precision[ (dimension * i) + j ] * observedMean[j]; } linearCombination[i] += observations * innerProduct; } // Q_0 + n Q double[] posteriorPrecision = new double[precision.length]; for (int row = 0; row < dimension; row++) { for (int col = 0; col < dimension; col++) { posteriorPrecision[ (dimension * row) + col ] = observations * precision[ (dimension * row) + col ]; if (row == col) { posteriorPrecision[ (dimension * row) + col ] += priorPrecisionDiagonal[row]; } } } double[] inversePosteriorPrecision = invertSPD(posteriorPrecision, dimension); double[] posteriorMean = new double[dimension]; for (int row = 0; row < dimension; row++) { double innerProduct = 0.0; for (int col = 0; col < dimension; col++) { innerProduct += inversePosteriorPrecision[ (dimension * row) + col ] * linearCombination[ col ]; } posteriorMean[row] = innerProduct; } return nextMVNormal(posteriorMean, posteriorPrecision, random); } /** * This method returns x such that L'x = b. * Note the transpose: this method assumes that * the input matrix is LOWER triangular, even though * back substitution operates on UPPER triangular matrices. */ public static double[] solveWithBackSubstitution(double[] b, double[] lowerTriangular) { double innerProduct; int n = b.length; double[] result = new double[n]; for (int i = n-1; i >= 0; i--) { innerProduct = 0.0; for (int j = i+1; j < n; j++) { // Assume we're dealing with a single lower triangular // matrix from a cholesky decomposition, so index into // it as if it is the transpose. innerProduct += result[j] * lowerTriangular[ (n * j) + i ]; } result[i] = (b[i] - innerProduct) / lowerTriangular[ (n * i) + i ]; } return result; } /** * This method returns x such that Lx = b * where L is lower triangular */ public static double[] solveWithForwardSubstitution(double[] b, double[] lowerTriangular) { double innerProduct; int n = b.length; double[] result = new double[n]; for (int i = 0; i < n; i++) { innerProduct = 0.0; for (int j = 0; j < i; j++) { innerProduct += result[j] * lowerTriangular[ (n * i) + j ]; } result[i] = (b[i] - innerProduct) / lowerTriangular[ (n * i) + i ]; } return result; } /** * This method returns the (lower-triangular) inverse of a lower triangular * matrix. */ public static double[] invertLowerTriangular(double[] inputMatrix, int dimension) { double[] outputMatrix = new double[inputMatrix.length]; double x; for (int row = 0; row < dimension; row++) { for (int col = 0; col <= row; col++) { // Off-diagonal elements are the negative inner product // (up to the row index) of the row from input and the col // from the output, divided by the diagonal from the input. if (col == row) { // Diagonal elements are the same, but add 1 to the numerator x = 1.0; } else { x = 0.0; } for (int i = col; i < row; i++) { x -= inputMatrix[ (dimension * row) + i ] * outputMatrix[ (dimension * i) + col ]; } outputMatrix[ (dimension * row) + col ] = x / inputMatrix[ (dimension * row) + row ]; } } return outputMatrix; } /** * Returns L'L for lower triangular matrix L. */ public static double[] lowerTriangularCrossproduct(double[] inputMatrix, int dimension) { double[] outputMatrix = new double[inputMatrix.length]; double innerProduct; for (int row = 0; row < dimension; row++) { for (int col = row; col < dimension; col++) { innerProduct = 0.0; for (int i = col; i < dimension; i++) { innerProduct += inputMatrix[ row + (dimension * i) ] * inputMatrix[ col + (dimension * i) ]; } outputMatrix[ (dimension * row) + col ] = innerProduct; outputMatrix[ row + (dimension * col) ] = innerProduct; } } return outputMatrix; } /** * Returns (lower-triangular) X = AB for square lower-triangular matrices A and B */ public static double[] lowerTriangularProduct(double[] leftMatrix, double[] rightMatrix, int dimension) { double[] outputMatrix = new double[leftMatrix.length]; double innerProduct; for (int row = 0; row < dimension; row++) { for (int col = 0; col <= row; col++) { innerProduct = 0.0; for (int i = col; i <= row; i++) { innerProduct += leftMatrix[ (dimension * row) + i ] * rightMatrix[ (dimension * i) + col ]; } outputMatrix[ (dimension * row) + col ] = innerProduct; } } return outputMatrix; } public static double[] invertSPD(double[] inputMatrix, int dimension) { return lowerTriangularCrossproduct(invertLowerTriangular(bandCholesky(inputMatrix, dimension), dimension), dimension); } /** * A Wishart random variate, based on R code by Bill Venables. * * @param sqrtScaleMatrix The lower-triangular matrix square root of the scale matrix. * To draw from the posterior of a precision (ie inverse covariance) matrix, * this should be chol( S^{-1} ), where S is the scatter matrix X'X of * columns of MV normal observations X. * @param dimension The size of the matrix * @param degreesOfFreedom The degree of freedom for the Wishart. Should be greater than dimension. For * a posterior distribution, this is the number of observations + the df of the prior. */ public static double[] nextWishart(double[] sqrtScaleMatrix, int dimension, int degreesOfFreedom, Randoms random) { double[] sample = new double[sqrtScaleMatrix.length]; for (int row = 0; row < dimension; row++) { for (int col = 0; col < row; col++) { sample[ (row * dimension) + col ] = random.nextGaussian(0, 1); } sample[ (row * dimension) + row ] = Math.sqrt(random.nextChiSq(degreesOfFreedom)); } //System.out.println(doubleArrayToString(sample, dimension)); //System.out.println(doubleArrayToString(sqrtScaleMatrix, dimension)); //System.out.println(doubleArrayToString(lowerTriangularProduct(sample, sqrtScaleMatrix, dimension), dimension)); System.out.println(diagonalToString(sample, dimension)); System.out.println(diagonalToString(sqrtScaleMatrix, dimension)); System.out.println(diagonalToString(lowerTriangularProduct(sample, sqrtScaleMatrix, dimension), dimension)); return lowerTriangularCrossproduct(lowerTriangularProduct(sample, sqrtScaleMatrix, dimension), dimension); } public static double[] nextWishartPosterior(double[] scatterMatrix, int observations, double[] priorPrecisionDiagonal, int priorDF, int dimension, Randoms random) { double[] scatterPlusPrior = new double[scatterMatrix.length]; System.arraycopy(scatterMatrix, 0, scatterPlusPrior, 0, scatterMatrix.length); for (int i=0; i < dimension; i++) { scatterPlusPrior[ (dimension * i) + i ] += 1.0 / priorPrecisionDiagonal[i]; } System.out.println(" inverted scatter plus prior"); System.out.println(diagonalToString(invertSPD(scatterPlusPrior, dimension), dimension)); System.out.println(" chol inverted scatter plus prior"); System.out.println(diagonalToString(cholesky(invertSPD(scatterPlusPrior, dimension), dimension), dimension)); double[] sqrtScaleMatrix = cholesky(invertSPD(scatterPlusPrior, dimension), dimension); return nextWishart(sqrtScaleMatrix, dimension, observations + priorDF, random); } /** Create a string representation of a square matrix in one-dimensional array format */ public static String doubleArrayToString(double[] matrix, int dimension) { NumberFormat formatter = NumberFormat.getInstance(); formatter.setMaximumFractionDigits(10); StringBuffer output = new StringBuffer(); for (int row = 0; row < dimension; row++) { for (int col = 0; col < dimension; col++) { output.append(formatter.format(matrix[ (dimension * row) + col ])); output.append("\t"); } output.append("\n"); } return output.toString(); } public static String diagonalToString(double[] matrix, int dimension) { NumberFormat formatter = NumberFormat.getInstance(); formatter.setMaximumFractionDigits(4); StringBuffer output = new StringBuffer(); for (int row = 0; row < dimension; row++) { output.append(formatter.format(matrix[ (dimension * row) + row ])); output.append(" "); } return output.toString(); } public static double[] getScatterMatrix(double[][] observationMatrix) { int observations = observationMatrix.length; int dimension = observationMatrix[0].length; double[] outputMatrix = new double[dimension * dimension]; double[] means = new double[dimension]; // collect the sample means for (int i = 0; i < observations; i++) { for (int d = 0; d < dimension; d++) { means[d] += observationMatrix[i][d]; } } for (int d = 0; d < dimension; d++) { means[d] /= observations; } // now the sample covariance (times n) for (int i = 0; i < observations; i++) { for (int d1 = 0; d1 < dimension; d1++) { for (int d2 = 0; d2 < dimension; d2++) { outputMatrix[ (dimension * d1) + d2 ] += (observationMatrix[i][d1] - means[d1]) * (observationMatrix[i][d2] - means[d2]); } } } return outputMatrix; } public static void testCholesky() { int observations = 1000; double[] mean = new double[20]; double[] precisionMatrix = new double[ 20 * 20 ]; for (int i=0; i<20; i++) { precisionMatrix[ (20 * i) + i ] = 1.0; } Randoms random = new Randoms(); double[] scatterMatrix = getScatterMatrix(nextMVNormal(observations, mean, precisionMatrix, random)); double[] priorPrecision = new double[20]; Arrays.fill(priorPrecision, 1.0); nextWishartPosterior(scatterMatrix, observations, priorPrecision, 21, 20, random); } public static void main (String[] args) { //double[] spd = { 19.133825, -1.180869, 6.403880, // -1.180869, 8.895968, 1.280748, // 6.403880, 1.280748, 9.155951 }; double[] spd = {3.0, 0.0, -1.0, 0.0, 3.0, 0.0, -1.0, 0.0, 3.0}; Randoms random = new Randoms(); double[] mean = { 1.0, 1.0, 1.0 }; double[] lower = cholesky(spd, 3); for (int iter = 0; iter < 10; iter++) { double[] sample = nextMVNormalWithCholesky(mean, lower, random); for (int i=0; i<sample.length; i++) { System.out.print(sample[i] + "\t"); } System.out.println(); } for (int iter = 0; iter < 10; iter++) { double[] sample = nextZeroSumMVNormalWithCholesky(mean, lower, random); for (int i=0; i<sample.length; i++) { System.out.print(sample[i] + "\t"); } System.out.println(); } /* int dim = 100; double[] bandLower = bandMatrixRoot(dim, 3); System.out.println(doubleArrayToString(bandLower, dim)); double[] bandMatrix = lowerTriangularCrossproduct(bandLower, dim); System.out.println(doubleArrayToString(bandMatrix, dim)); long startTime; startTime = System.currentTimeMillis(); for (int i=0; i<100000; i++) { bandCholesky(bandMatrix, dim); } System.out.println(System.currentTimeMillis() - startTime); startTime = System.currentTimeMillis(); for (int i=0; i<100000; i++) { cholesky(bandMatrix, dim); } System.out.println(System.currentTimeMillis() - startTime); */ /* double[] l = {2.87527, 0.0, 0.0, -2.4168, 1.28, 0.0, -0.585168, -2.792234, 2.769609}; double[] spd = { 19.133825, -1.180869, 6.403880, -1.180869, 8.895968, 1.280748, 6.403880, 1.280748, 9.155951 }; double[] scatter = { 103.59761, -16.370939, 12.694755, -16.37094, 106.117048, 4.079818, 12.69476, 4.079818, 94.065152 }; double[] priorDiagonal = { 1.0, 1.0, 1.0 }; testCholesky(); */ //System.out.println(doubleArrayToString(nextWishartPosterior(scatter, 100, priorDiagonal, 10, 3, new Randoms()), 3)); /* long startTime = System.currentTimeMillis(); for (int i=0; i<10000; i++) { invertSPD(spd, 3); } System.out.println(System.currentTimeMillis() - startTime); */ //System.out.println(doubleArrayToString(invertSPD(spd, 3), 3)); //System.out.println(doubleArrayToString(nextWishart(l, 3, 25, new Randoms()), 3)); /* double[] precisionMatrix = {0.98, -1.0, 0.0, -1.0, 2.13, -1.0, 0.0, -1.0, 1.01}; double[] mean = new double[3]; Randoms random = new Randoms(); for (int i=0; i<10; i++) { double[] variate = nextMVNormal(mean, precisionMatrix, random); for (int j=0; j<variate.length; j++) { System.out.print(variate[j] + "\t"); } System.out.println(); } */ } }
19,897
27.837681
120
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/VectorStats.java
package cc.mallet.util; /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** * Class of static methods for calculating statistics of a SparseVector sample * packaged in an InstanceList. * * @author Jerod Weinman <A HREF="mailto:[email protected]">[email protected]</A> */ import java.util.Arrays; import java.util.Iterator; import cc.mallet.types.*; import gnu.trove.TIntHashSet; public class VectorStats { /** * Returns a <CODE>SparseVector</CODE> whose entries (taken from the union of * those in the instances) are the expected values of those in the * <CODE>InstanceList</CODE>. This implies the returned vector will not have * binary values. */ public static SparseVector mean(InstanceList instances) { if (instances == null || instances.size() == 0) return null; Iterator<Instance> instanceItr = instances.iterator(); SparseVector v; Instance instance; int indices[]; int maxSparseIndex = -1; int maxDenseIndex = -1; // First, we find the union of all the indices used in the instances TIntHashSet hIndices = new TIntHashSet(instances.getDataAlphabet().size()); while (instanceItr.hasNext()) { instance = (Instance) instanceItr.next(); v = (SparseVector) (instance.getData()); indices = v.getIndices(); if (indices != null) { hIndices.addAll(indices); if (indices[indices.length - 1] > maxSparseIndex) maxSparseIndex = indices[indices.length - 1]; } else // dense if (v.numLocations() > maxDenseIndex) maxDenseIndex = v.numLocations() - 1; } if (maxDenseIndex > -1) // dense vectors were present { if (maxSparseIndex > maxDenseIndex) // sparse vectors were present and they had greater indices than // the dense vectors { // therefore, we create sparse vectors and // add all the dense indices for (int i = 0; i <= maxDenseIndex; i++) hIndices.add(i); } else // sparse indices may have been present, but we don't care // since they never had indices that exceeded those of the // dense vectors { return mean(instances, maxDenseIndex + 1); } } // reaching this statement implies we can create a sparse vector return mean(instances, hIndices.toArray()); } /** * Returns a <CODE>SparseVector</CODE> whose entries (dense with the given * number of indices) are the expected values of those in the * <CODE>InstanceList</CODE>. This implies the returned vector will not have * binary values. */ public static SparseVector mean(InstanceList instances, int numIndices) { SparseVector mv = new SparseVector(new double[numIndices], false); return mean(instances, mv); } /** * Returns a <CODE>SparseVector</CODE> whose entries (the given indices) are * the expected values of those in the <CODE>InstanceList</CODE>. This implies * the returned vector will not have binary values. */ public static SparseVector mean(InstanceList instances, int[] indices) { // Create the mean vector with the indices having all zeros, // nothing copied, sorted, and no checks for duplicates. // [email protected] // it is faster to sort indices first Arrays.sort(indices); SparseVector mv = new SparseVector(indices, new double[indices.length], // [email protected] // it is faster to sort indices first (above) // false, true, false); false, false, false); return mean(instances, mv); } private static SparseVector mean(InstanceList instances, SparseVector meanVector) { if (instances == null || instances.size() == 0) return null; Instance instance; SparseVector v; Iterator<Instance> instanceItr = instances.iterator(); double factor = 1.0 / (double) instances.size(); while (instanceItr.hasNext()) { instance = (Instance) instanceItr.next(); v = (SparseVector) (instance.getData()); meanVector.plusEqualsSparse(v, factor); } return meanVector; } /** * Returns a <CODE>SparseVector</CODE> whose entries (taken from the union of * those in the instances) are the variance of those in the * <CODE>InstanceList</CODE>. This implies the returned vector will not have * binary values. * * @param unbiased * Normalizes by N-1 when true, and by N otherwise. */ public static SparseVector variance(InstanceList instances, boolean unbiased) { return variance(instances, mean(instances), unbiased); } /** * Returns a <CODE>SparseVector</CODE> whose entries (taken from the mean * argument) are the variance of those in the <CODE>InstanceList</CODE>. This * implies the returned vector will not have binary values. * * @param unbiased * Normalizes by N-1 when true, and by N otherwise. */ public static SparseVector variance(InstanceList instances, SparseVector mean, boolean unbiased) { if (instances == null || instances.size() == 0) return null; double factor = 1.0 / (double) (instances.size() - (unbiased ? 1.0 : 0.0)); System.out.println("factor = " + factor); SparseVector v; // var = (x^2 - n*mu^2)/(n-1) SparseVector vv = (SparseVector) mean.cloneMatrix(); vv.timesEqualsSparse(vv, -(double) instances.size() * factor); Iterator<Instance> instanceItr = instances.iterator(); Instance instance; while (instanceItr.hasNext()) { instance = (Instance) instanceItr.next(); v = (SparseVector) ((SparseVector) (instance.getData())).cloneMatrix(); v.timesEqualsSparse(v); vv.plusEqualsSparse(v, factor); } System.out.println("Var:\n" + vv); return vv; } /** Returns unbiased variance */ public static SparseVector variance(InstanceList instances) { return variance(instances, true); } /** Returns unbiased variance of instances having the given mean. */ public static SparseVector variance(InstanceList instances, SparseVector mean) { return variance(instances, mean, true); } /** * Square root of variance. * * @param mean * Mean of the given instances. * @param unbiased * Normalizes variance by N-1 when true, and by N otherwise. * @see variance */ public static SparseVector stddev(InstanceList instances, SparseVector mean, boolean unbiased) { if (instances.size() == 0) return null; SparseVector sv = variance(instances, mean, unbiased); int dim = sv.numLocations(); double val; for (int i = 0; i < dim; i++) { val = sv.valueAtLocation(i); sv.setValueAtLocation(i, Math.sqrt(val)); } return sv; } /** Square root of unbiased variance. */ public static SparseVector stddev(InstanceList instances) { return stddev(instances, true); } /** * Square root of variance. * * @param unbiased * Normalizes variance by N-1 when true, and by N otherwise. * @see variance */ public static SparseVector stddev(InstanceList instances, boolean unbiased) { return stddev(instances, mean(instances), unbiased); } /** Square root of unbiased variance of instances having the given mean */ public static SparseVector stddev(InstanceList instances, SparseVector mean) { return stddev(instances, mean, true); } }
7,442
27.193182
88
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/Lexer.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; import java.util.Iterator; public interface Lexer extends Iterator { public int getStartOffset (); public int getEndOffset (); public String getTokenString (); // Iterator interface methods public boolean hasNext (); // Returns token text as a String public Object next (); public void remove (); }
864
21.763158
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/MalletLogger.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; import java.util.logging.*; import java.io.*; public class MalletLogger extends Logger { // Initialize the java.util.logging.config properties to the MALLET default config file // in cc.mallet.util.resources.logging.properties //Create an array that allows us to reference the java logging levels by a simple integer. static public Level[] LoggingLevels = {Level.OFF, Level.SEVERE, Level.WARNING, Level.INFO, Level.CONFIG, Level.FINE, Level.FINER, Level.FINEST, Level.ALL}; static { if (System.getProperty("java.util.logging.config.file") == null && System.getProperty("java.util.logging.config.class") == null) { // TODO What is going on here? This is causing an error //System.setProperty("java.util.logging.config.class", "cc.mallet.util.Logger.DefaultConfigurator"); try { InputStream s = MalletLogger.class.getResourceAsStream ("resources/logging.properties"); if (s == null) throw new IOException (); LogManager.getLogManager().readConfiguration(s); Logger.global.config ("Set java.util.logging properties from "+ MalletLogger.class.getPackage().getName() + "/resources/logging.properties"); } catch (IOException e) { System.err.println ("Couldn't open "+MalletLogger.class.getName()+" resources/logging.properties file.\n" +" Perhaps the 'resources' directories weren't copied into the 'class' directory.\n" +" Continuing."); } } } protected MalletLogger (String name, String resourceBundleName) { super (name, resourceBundleName); } public static Logger getLogger (String name) { return Logger.getLogger (name); } /** Convenience method for finding the root logger. */ public Logger getRootLogger() { Logger rootLogger = this; while (rootLogger.getParent() != null) { rootLogger = rootLogger.getParent(); } return rootLogger; } }
2,499
32.783784
109
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/PrintUtilities.java
package cc.mallet.util; import java.awt.*; import javax.swing.*; import java.awt.print.*; /** A simple utility class that lets you very simply print * an arbitrary component. Just pass the component to the * PrintUtilities.printComponent. The component you want to * print doesn't need a print method and doesn't have to * implement any interface or do anything special at all. * <P> * If you are going to be printing many times, it is marginally more * efficient to first do the following: * <PRE> * PrintUtilities printHelper = new PrintUtilities(theComponent); * </PRE> * then later do printHelper.print(). But this is a very tiny * difference, so in most cases just do the simpler * PrintUtilities.printComponent(componentToBePrinted). * * 7/99 Marty Hall, http://www.apl.jhu.edu/~hall/java/ * May be freely used or adapted. */ public class PrintUtilities implements Printable { private Component componentToBePrinted; public static void printComponent(Component c) { new PrintUtilities(c).print(); } public PrintUtilities(Component componentToBePrinted) { this.componentToBePrinted = componentToBePrinted; } public void print() { PrinterJob printJob = PrinterJob.getPrinterJob(); printJob.setPrintable(this); if (printJob.printDialog()) try { printJob.print(); } catch(PrinterException pe) { System.out.println("Error printing: " + pe); } } public int print(Graphics g, PageFormat pageFormat, int pageIndex) { if (pageIndex > 0) { return(NO_SUCH_PAGE); } else { Graphics2D g2d = (Graphics2D)g; g2d.translate(pageFormat.getImageableX(), pageFormat.getImageableY()); disableDoubleBuffering(componentToBePrinted); componentToBePrinted.paint(g2d); enableDoubleBuffering(componentToBePrinted); return(PAGE_EXISTS); } } /** The speed and quality of printing suffers dramatically if * any of the containers have double buffering turned on. * So this turns if off globally. * @see enableDoubleBuffering */ public static void disableDoubleBuffering(Component c) { RepaintManager currentManager = RepaintManager.currentManager(c); currentManager.setDoubleBufferingEnabled(false); } /** Re-enables double buffering globally. */ public static void enableDoubleBuffering(Component c) { RepaintManager currentManager = RepaintManager.currentManager(c); currentManager.setDoubleBufferingEnabled(true); } }
2,516
31.269231
76
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/Timing.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.util; /** * A class for timing things. * Originally inspired by the Timing class in the Stanford NLP cade, * but completely rewritten. * <p/> * Created: Dec 30, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: Timing.java,v 1.1 2007/10/22 21:37:40 mccallum Exp $ */ public class Timing { private long objCreationTime; private long startTime; public Timing () { startTime = System.currentTimeMillis (); objCreationTime = startTime; } /** * Print to System.out how much time has passed, resetting this Timing's start time to * the current time. Time is measured from the most recent * <code>tick</code> call, or when this object was created. * * @param msg Prefix of string printed with time * @return Number of elapsed milliseconds from tick (or start) */ public long tick (String msg) { long elapsed = report (msg); startTime = System.currentTimeMillis (); return elapsed; } /** * Returns how much time as passed since Object creation, or the most recent call to tick(). * @return Number of elapsed milliseconds */ public long elapsedTime () { return System.currentTimeMillis () - startTime; } /** * Returns the number of milliseconds since this object was created. * Ignores previous calls to <tt>tick</tt>, unlike * <tt>elapsedTime</tt> and <tt>tick</tt>. */ public long totalElapsedTime () { return System.currentTimeMillis () - objCreationTime; } /** Like tick(), but doesn't reset the counter. */ public long report (String msg) { long currentTime = System.currentTimeMillis (); long elapsed = currentTime - startTime; System.out.println (msg + " time (ms) = " + (elapsed)); return elapsed; } }
2,229
29.547945
94
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/PlainLogFormatter.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** This formmater extends and replaces the SimpleFormatter provided by java. It just writes out the message with no adornments. @author David Pinto <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; import java.util.logging.*; public class PlainLogFormatter extends SimpleFormatter { public PlainLogFormatter() { super(); } public String format (LogRecord record) { return record.getMessage()+ "\n"; } }
882
27.483871
80
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/CollectionUtils.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.util; import gnu.trove.THashSet; import gnu.trove.TObjectDoubleHashMap; import gnu.trove.TObjectDoubleProcedure; import java.util.*; /** * * * Created: Sun Jan 25 01:04:29 2004 * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: CollectionUtils.java,v 1.1 2007/10/22 21:37:40 mccallum Exp $ */ public class CollectionUtils { private CollectionUtils() {} // No instances public static String dumpToString (Collection c, String separator) { String retval = ""; for (Iterator it = c.iterator(); it.hasNext();) { retval += String.valueOf(it.next()); retval += separator; } return retval; } public static String dumpToString (Collection c) { return dumpToString (c, " "); } public static void print (Collection c) { System.out.println (dumpToString (c)); } public static void print (Collection c, String separator) { System.out.println (dumpToString (c, separator)); } public static Collection subset (Collection c, int size, java.util.Random rand) { ArrayList list = new ArrayList (c); int realSize = (size < c.size()) ? size : c.size(); java.util.Collections.shuffle (list, rand); return list.subList (0, realSize); } public static List sortedUnion (List args1, List args2) { SortedSet set = new TreeSet (); set.addAll (args1); set.addAll (args2); List lst = new ArrayList (set.size ()); for (Iterator it = set.iterator (); it.hasNext ();) { Object o = it.next (); lst.add (o); } return lst; } /** Computes a nondestructive intersection of two collections. */ public static Collection intersection (Collection c1, Collection c2) { Set set = new THashSet (c1); set.retainAll (c2); return set; } public static Collection union (Collection c1, Collection c2) { Set set = new THashSet (c1); set.addAll (c2); return set; } /** Returns the key in map that has the greatest score */ public static Object argmax (TObjectDoubleHashMap map) { // A local class! Yes, Virginia, this is legal Java. class Accumulator implements TObjectDoubleProcedure { double bestVal = Double.NEGATIVE_INFINITY; Object bestObj = null; public boolean execute (Object a, double b) { if (b > bestVal) { bestVal = b; bestObj = a; } return true; } } Accumulator procedure = new Accumulator (); map.forEachEntry (procedure); return procedure.bestObj; } public static interface Fn { Object f (Object input); } /** Returns a new collection whose elements consist of the function fn.f applied to all of the * elements of the given collection. The returned collection will have the same class as the input * collection. */ public static Collection map (Collection c, Fn fn) { Class collectionClass = c.getClass (); Collection copy; try { copy = (Collection) collectionClass.newInstance (); } catch (InstantiationException e) { throw new RuntimeException (e); // should never happen; collections always have public default ctor } catch (IllegalAccessException e) { throw new RuntimeException (e); // should never happen; collections always have public default ctor } Iterator it = c.iterator (); while (it.hasNext ()) { copy.add (fn.f (it.next ())); } return copy; } } // Collections
3,865
26.614286
105
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/CommandOption.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; import java.util.*; import java.io.*; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import cc.mallet.util.BshInterpreter; public abstract class CommandOption { static BshInterpreter interpreter; /** Maps a Java class to the array of CommandOption objects that are owned by it. */ static HashMap class2options = new HashMap (); Class owner; /** The name of the argument, eg "input" */ java.lang.String name; /** The display name of the argument type, eg "[TRUE|FALSE]" or "FILE" */ java.lang.String argName; /** The type of the argument, if present */ Class argType; boolean argRequired; java.lang.String shortdoc; java.lang.String longdoc; java.lang.String fullName; /** did this command option get processed, or do we just have default value */ boolean invoked = false; public CommandOption (Class owner, java.lang.String name, java.lang.String argName, Class argType, boolean argRequired, java.lang.String shortdoc, java.lang.String longdoc) { this.owner = owner; this.name = name; this.argName = argName; this.argType = argType; this.argRequired = argRequired; this.shortdoc = shortdoc; this.longdoc = longdoc; Package p = owner.getPackage(); this.fullName = (p != null ? p.toString() : "") + name; if (interpreter == null) interpreter = new BshInterpreter (); if (owner != CommandOption.class) { CommandOption.List options = (CommandOption.List) class2options.get (owner); if (options == null) { options = new CommandOption.List (""); class2options.put (owner, options); } options.add (this); } } /** @deprecated */ public CommandOption (Class owner, java.lang.String name, java.lang.String argName, Class argType, boolean argRequired, java.lang.String shortdoc) { this (owner, name, argName, argType, argRequired, shortdoc, null); } /** Give this CommandOption the opportunity to process the index'th argument in args. Return the next unprocessed index. */ public int process (java.lang.String[] args, int index) { //System.out.println (name + " processing arg " + args[index]); if (args.length == 0) return index; // System.out.println(index + ": " + args[index]); // Is there anything to process? if (index >= args.length || args[index] == null || args[index].length() < 2 || args[index].charAt(0) != '-' || args[index].charAt(1) != '-') return index; // Determine what the command name is java.lang.String optFullName = args[index].substring(2); int dotIndex = optFullName.lastIndexOf('.'); java.lang.String optName = optFullName; // Commands may have a package prefix if (dotIndex != -1) { java.lang.String optPackageName = optFullName.substring (0, dotIndex); if (owner.getPackage() != null && ! owner.getPackage().toString().endsWith(optPackageName)) return index; optName = optFullName.substring (dotIndex+1); } // Does the option name match the name of this CommandOption? if (! name.equals(optName)) return index; // We have now determined that this CommandOption is the correct one this.invoked = true; index++; if (args.length > index && (args[index].length() < 2 || (args[index].charAt(0) != '-' && args[index].charAt(1) != '-'))) { index = parseArg (args, index); } else { if (argRequired) { throw new IllegalArgumentException ("Missing argument for option " + optName); } else { // xxx This is not parallel behavior to the above parseArg(String[],int) method. parseArg (args, -index); // xxx was "" } //index++; } return index; } public static BshInterpreter getInterpreter () { return interpreter; } public static java.lang.String[] process (java.lang.Class owner, java.lang.String[] args) { CommandOption.List options = (CommandOption.List) class2options.get (owner); if (options == null) throw new IllegalArgumentException ("No CommandOptions registered for class "+owner); return options.process (args); } public static List getList (java.lang.Class owner) { CommandOption.List options = (CommandOption.List) class2options.get (owner); if (options == null) throw new IllegalArgumentException ("No CommandOptions registered for class "+owner); return options; } public static void setSummary (java.lang.Class owner, java.lang.String summary) { CommandOption.List options = (CommandOption.List) class2options.get (owner); if (options == null) throw new IllegalArgumentException ("No CommandOption.List registered for class "+owner); options.setSummary (summary); } public java.lang.String getFullName () { return fullName; } public java.lang.String getName () { return name; } public abstract java.lang.String defaultValueToString (); public abstract java.lang.String valueToString (); /** Return true is this CommandOption was matched by one of the processed arguments. */ public boolean wasInvoked () { return invoked; } /** Called after this CommandOption matches an argument. The default implementation simply calls parseArg(String), and returns index+1; unless index is negative, in which case it calls parseArg((String)null) and returns index. */ public int parseArg (java.lang.String args[], int index) { if (index < 0) { parseArg ((java.lang.String)null); return index; } else { parseArg (args[index]); return index+1; } } public void parseArg (java.lang.String arg) {} /** To be overridden by subclasses; "list" is the the CommandOption.List that called this option */ public void postParsing (CommandOption.List list) {} /** For objects that can provide CommandOption.List's (which can be merged into other lists. */ public static interface ListProviding { public CommandOption.List getCommandOptionList (); } public static void printOptionValues(Class owner) { CommandOption.List options = (CommandOption.List) class2options.get (owner); for (int i=0; i < options.size(); i++) { CommandOption option = options.getCommandOption(i); System.out.println(option.getName() + "\t=\t" + option.valueToString()); } } public static class List { ArrayList options; HashMap map; java.lang.String summary; private List (java.lang.String summary) { this.options = new ArrayList (); this.map = new HashMap (); this.summary = summary; add (new Boolean (CommandOption.class, "help", "TRUE|FALSE", false, false, "Print this command line option usage information. "+ "Give argument of TRUE for longer documentation", null) { public void postParsing(CommandOption.List list) { printUsage(value); System.exit(-1); } }); add (new Object (CommandOption.class, "prefix-code", "'JAVA CODE'", true, null, "Java code you want run before any other interpreted code. Note that the text "+ "is interpreted without modification, so unlike some other Java code options, "+ "you need to include any necessary 'new's when creating objects.", null)); add (new File (CommandOption.class, "config", "FILE", false, null, "Read command option values from a file", null) { public void postParsing(CommandOption.List list) { readFromFile(value); } } ); } public List (java.lang.String summary, CommandOption[] options) { this(summary); add(options); } public void setSummary (java.lang.String s) { this.summary = s; } public int size () { return options.size(); } public CommandOption getCommandOption (int index) { return (CommandOption) options.get(index); } public void add (CommandOption opt) { options.add (opt); map.put (opt.getFullName(), opt); } public void add (CommandOption[] opts) { for (int i = 0; i < opts.length; i++) add (opts[i]); } public void add (CommandOption.List opts) { for (int i = 0; i < opts.size(); i++) add (opts.getCommandOption(i)); } public void add (Class owner) { CommandOption.List options = (CommandOption.List) class2options.get (owner); if (options == null) throw new IllegalArgumentException ("No CommandOptions registered for class "+owner); add (options); } /** * Load configuration information from a file. If the filename ends * with ".xml", the file is interpreted as a Java XML configuration file. * Otherwise it is interpreted as a text config file (eg "key = value" on each line). * Note that text files can only use Latin 1 (en-us) characters, while * XML files can be UTF-8. */ public void readFromFile(java.io.File configurationFile) { try { Properties properties = new Properties(); if (configurationFile.getName().endsWith(".xml")) { properties.loadFromXML(new FileInputStream(configurationFile)); } else { properties.load(new FileInputStream(configurationFile)); } Enumeration keys = properties.propertyNames(); while (keys.hasMoreElements()) { java.lang.String key = (java.lang.String) keys.nextElement(); java.lang.String[] values = properties.getProperty(key).split("\\s+"); boolean foundValue = false; for (int i = 0; i < options.size(); i++) { CommandOption option = (CommandOption) options.get(i); if (option.name.equals(key)) { foundValue = true; option.parseArg(values, 0); break; } } } } catch (Exception e) { System.err.println("Unable to process configuration file: " + e.getMessage()); } } /** Parse and process command-line options in args. Return sub-array of args occurring after first non-recognized arg that doesn't begin with a dash. */ public java.lang.String[] process (java.lang.String[] args) { int index = 0; while (index < args.length) { int newIndex = index; for (int i = 0; i < options.size(); i++) { CommandOption o = (CommandOption)options.get(i); newIndex = o.process (args, index); if (newIndex != index) { o.postParsing(this); break; } } if (newIndex == index) { // All of the CommandOptions had their chance to claim the index'th option,, // but none of them did. printUsage(false); throw new IllegalArgumentException ("Unrecognized option " + index + ": " +args[index]); } index = newIndex; } return new java.lang.String[0]; } public int processOptions (java.lang.String[] args) { for (int index = 0; index < args.length;) { int newIndex = index; for (int i = 0; i < options.size(); i++) { CommandOption o = (CommandOption)options.get(i); newIndex = o.process (args, index); if (newIndex != index) { o.postParsing(this); break; } } if (newIndex == index) { if (index < args.length && args[index].length() > 1 && args[index].charAt(0) == '-' && args[index].charAt(1) == '-') { printUsage(false); throw new IllegalArgumentException ("Unrecognized option "+args[index]); } return index; } index = newIndex; } return args.length; } public void printUsage (boolean printLongDoc) { // xxx Fix this to have nicer formatting later. System.err.println (summary); for (int i = 0; i < options.size(); i++) { CommandOption o = (CommandOption) options.get(i); System.err.println ("--"+ o.name + " " + o.argName + "\n " + o.shortdoc); if (o.longdoc != null && printLongDoc) System.err.println (" "+o.longdoc); System.err.println (" Default is "+o.defaultValueToString()); } } public void logOptions (java.util.logging.Logger logger) { for (int i = 0; i < options.size(); i++) { CommandOption o = (CommandOption) options.get(i); logger.info (o.name+" = "+o.valueToString ()); } } } public static class Boolean extends CommandOption { public boolean value, defaultValue;; public Boolean (Class owner, java.lang.String name, java.lang.String argName, boolean argRequired, boolean defaultValue, java.lang.String shortdoc, java.lang.String longdoc) { super (owner, name, argName, Boolean.class, argRequired, shortdoc, longdoc); this.defaultValue = value = defaultValue; } public boolean value () { return value; } public void parseArg (java.lang.String arg) { if (arg == null || arg.equalsIgnoreCase("true") || arg.equals("1")) value = true; else if (arg.equalsIgnoreCase("false") || arg.equals("0")) value = false; else throw new IllegalArgumentException ("Boolean option should be true|false|0|1. Instead found "+arg); } public java.lang.String defaultValueToString() { return java.lang.Boolean.toString(defaultValue); } public java.lang.String valueToString () { return java.lang.Boolean.toString (value); } } public static class Integer extends CommandOption { public int value, defaultValue; public Integer (Class owner, java.lang.String name, java.lang.String argName, boolean argRequired, int defaultValue, java.lang.String shortdoc, java.lang.String longdoc) { super (owner, name, argName, Integer.class, argRequired, shortdoc, longdoc); this.defaultValue = value = defaultValue; } public int value () { return value; } public void parseArg (java.lang.String arg) { value = java.lang.Integer.parseInt(arg); } public java.lang.String defaultValueToString() { return java.lang.Integer.toString(defaultValue); } public java.lang.String valueToString () { return java.lang.Integer.toString (value); } } public static class IntegerArray extends CommandOption { public int[] value, defaultValue; public IntegerArray (Class owner, java.lang.String name, java.lang.String argName, boolean argRequired, int[] defaultValue, java.lang.String shortdoc, java.lang.String longdoc) { super (owner, name, argName, IntegerArray.class, argRequired, shortdoc, longdoc); this.defaultValue = value = defaultValue; } public int[] value () { return value; } public void parseArg (java.lang.String arg) { java.lang.String elts[] = arg.split(","); value = new int[elts.length]; for (int i = 0; i < elts.length; i++) value[i] = java.lang.Integer.parseInt(elts[i]); } public java.lang.String defaultValueToString() { StringBuffer b = new StringBuffer(); java.lang.String sep = ""; for (int i = 0; i < defaultValue.length; i++) { b.append(sep).append(java.lang.Integer.toString(defaultValue[i])); sep = ","; } return b.toString(); } public java.lang.String valueToString() { StringBuffer b = new StringBuffer(); java.lang.String sep = ""; for (int i = 0; i < value.length; i++) { b.append(sep).append(java.lang.Integer.toString(value[i])); sep = ","; } return b.toString(); } } public static class Double extends CommandOption { public double value, defaultValue; public Double (Class owner, java.lang.String name, java.lang.String argName, boolean argRequired, double defaultValue, java.lang.String shortdoc, java.lang.String longdoc) { super (owner, name, argName, Double.class, argRequired, shortdoc, longdoc); this.defaultValue = value = defaultValue; } public double value () { return value; } public void parseArg (java.lang.String arg) { value = java.lang.Double.parseDouble(arg); } public java.lang.String defaultValueToString() { return java.lang.Double.toString(defaultValue); } public java.lang.String valueToString () { return java.lang.Double.toString (value); } } public static class DoubleArray extends CommandOption { public double[] value, defaultValue; public DoubleArray (Class owner, java.lang.String name, java.lang.String argName, boolean argRequired, double[] defaultValue, java.lang.String shortdoc, java.lang.String longdoc) { super (owner, name, argName, IntegerArray.class, argRequired, shortdoc, longdoc); this.defaultValue = value = defaultValue; } public double[] value () { return value; } public void parseArg (java.lang.String arg) { java.lang.String elts[] = arg.split(","); value = new double[elts.length]; for (int i = 0; i < elts.length; i++) value[i] = java.lang.Double.parseDouble(elts[i]); } public java.lang.String defaultValueToString() { StringBuffer b = new StringBuffer(); java.lang.String sep = ""; for (int i = 0; i < defaultValue.length; i++) { b.append(sep).append(java.lang.Double.toString(defaultValue[i])); sep = ","; } return b.toString(); } public java.lang.String valueToString() { StringBuffer b = new StringBuffer(); java.lang.String sep = ""; for (int i = 0; i < value.length; i++) { b.append(sep).append(java.lang.Double.toString(value[i])); sep = ","; } return b.toString(); } } public static class String extends CommandOption { public java.lang.String value, defaultValue; public String (Class owner, java.lang.String name, java.lang.String argName, boolean argRequired, java.lang.String defaultValue, java.lang.String shortdoc, java.lang.String longdoc) { super (owner, name, argName, java.lang.String.class, argRequired, shortdoc, longdoc); this.defaultValue = value = defaultValue; } public java.lang.String value () { return value; } public void parseArg (java.lang.String arg) { value = arg; } public java.lang.String defaultValueToString() { return defaultValue; } public java.lang.String valueToString() { return value; } } public static class SpacedStrings extends CommandOption { public java.lang.String[] value, defaultValue; public SpacedStrings (Class owner, java.lang.String name, java.lang.String argName, boolean argRequired, java.lang.String[] defaultValue, java.lang.String shortdoc, java.lang.String longdoc) { super (owner, name, argName, java.lang.String.class, argRequired, shortdoc, longdoc); this.defaultValue = value = defaultValue; } public java.lang.String[] value () { return value; } public int parseArg (java.lang.String args[], int index) { int count = 0; this.value = null; while (index < args.length && (args[index].length() < 2 || (args[index].charAt(0) != '-' && args[index].charAt(1) != '-'))) { count++; java.lang.String[] oldValue = value; value = new java.lang.String[count]; if (oldValue != null) System.arraycopy (oldValue, 0, value, 0, oldValue.length); value[count-1] = args[index]; index++; } return index; } public java.lang.String defaultValueToString() { if (defaultValue == null) return "(null)"; StringBuffer sb = new StringBuffer(); for (int i = 0; i < defaultValue.length; i++) { sb.append (defaultValue[i]); if (i < defaultValue.length-1) sb.append (" "); } return sb.toString(); } public java.lang.String valueToString () { if (value == null) return "(null)"; java.lang.String val = ""; for (int i = 0; i < value.length; i++) { val += value [i] + " "; } return val; } } public static class File extends CommandOption { public java.io.File value, defaultValue; public File (Class owner, java.lang.String name, java.lang.String argName, boolean argRequired, java.io.File defaultValue, java.lang.String shortdoc, java.lang.String longdoc) { super (owner, name, argName, java.io.File.class, argRequired, shortdoc, longdoc); this.defaultValue = value = defaultValue; } public java.io.File value () { return value; } public void parseArg (java.lang.String arg) { value = new java.io.File(arg); } public java.lang.String defaultValueToString() { return defaultValue == null ? null : defaultValue.toString(); } public java.lang.String valueToString () { return value == null ? null : value.toString(); }; } // Value is a string that can take on only a limited set of values public static class Set extends CommandOption { public java.lang.String value, defaultValue; java.lang.String[] setContents; java.lang.String contentsString; public Set (Class owner, java.lang.String name, java.lang.String argName, boolean argRequired, java.lang.String[] setContents, int defaultIndex, java.lang.String shortdoc, java.lang.String longdoc) { super (owner, name, argName, java.io.File.class, argRequired, shortdoc, longdoc); this.defaultValue = this.value = setContents[defaultIndex]; this.setContents = setContents; StringBuffer sb = new StringBuffer (); for (int i = 0; i < setContents.length; i++) { sb.append (setContents[i]); sb.append (","); } this.contentsString = sb.toString(); } public java.lang.String value () { return value; } public void parseArg (java.lang.String arg) { value = null; for (int i = 0; i < setContents.length; i++) if (setContents[i].equals(arg)) value = setContents[i]; if (value == null) throw new IllegalArgumentException ("Unrecognized option argument \""+arg+"\" not in set "+contentsString); } public java.lang.String defaultValueToString() { return defaultValue; } public java.lang.String valueToString() { return value; } } public static class Object extends CommandOption { public java.lang.Object value, defaultValue; public Object (Class owner, java.lang.String name, java.lang.String argName, boolean argRequired, java.lang.Object defaultValue, java.lang.String shortdoc, java.lang.String longdoc) { super (owner, name, argName, java.lang.Object.class, argRequired, shortdoc, longdoc); this.defaultValue = value = defaultValue; } public java.lang.Object value () { return value; } public void parseArg (java.lang.String arg) { try { value = interpreter.eval (arg); } catch (bsh.EvalError e) { throw new IllegalArgumentException ("Java interpreter eval error\n"+e); } } public java.lang.String defaultValueToString() { return defaultValue == null ? null : defaultValue.toString(); } public java.lang.String valueToString() { return value == null ? null : value.toString(); } } public static class ObjectFromBean extends Object { public ObjectFromBean (Class owner, java.lang.String name, java.lang.String argName, boolean argRequired, java.lang.Object defValue, java.lang.String shortdoc, java.lang.String longdoc) { super (owner, name, argName, argRequired, java.lang.Object.class, shortdoc, longdoc); defaultValue = value = defValue; } public java.lang.Object value () { return value; } public void parseArg (java.lang.String arg) { // parse something like MaxEntTrainer,gaussianPriorVariance=10,numIterations=20 //System.out.println("Arg = " + arg); // First, split the argument at commas. java.lang.String fields[] = arg.split(","); //Massage constructor name, so that MaxEnt, MaxEntTrainer, new MaxEntTrainer() // all call new MaxEntTrainer() java.lang.String constructorName = fields[0]; if (constructorName.contains("(") || constructorName.contains(";")) // if contains ( or a ;, pass it though super.parseArg(arg); else { super.parseArg("new " + constructorName + "()"); // use default constructor to make the object } // Call the appropriate set... methods that appeared comma-separated // find methods associated with the class we just built Method methods[] = this.value.getClass().getMethods(); // find setters corresponding to parameter names. for (int i=1; i<fields.length; i++){ java.lang.String nameValuePair[] = fields[i].split("="); java.lang.String parameterName = nameValuePair[0]; java.lang.String parameterValue = nameValuePair[1]; //todo: check for val present! java.lang.Object parameterValueObject; try { parameterValueObject = getInterpreter().eval(parameterValue); } catch (bsh.EvalError e) { throw new IllegalArgumentException ("Java interpreter eval error on parameter "+ parameterName + "\n"+e); } boolean foundSetter = false; for (int j=0; j<methods.length; j++){ // System.out.println("method " + j + " name is " + methods[j].getName()); // System.out.println("set" + Character.toUpperCase(parameterName.charAt(0)) + parameterName.substring(1)); if ( ("set" + Character.toUpperCase(parameterName.charAt(0)) + parameterName.substring(1)).equals(methods[j].getName()) && methods[j].getParameterTypes().length == 1){ // System.out.println("Matched method " + methods[j].getName()); // Class[] ptypes = methods[j].getParameterTypes(); // System.out.println("Parameter types:"); // for (int k=0; k<ptypes.length; k++){ // System.out.println("class " + k + " = " + ptypes[k].getName()); // } try { java.lang.Object[] parameterList = new java.lang.Object[]{parameterValueObject}; // System.out.println("Argument types:"); // for (int k=0; k<parameterList.length; k++){ // System.out.println("class " + k + " = " + parameterList[k].getClass().getName()); // } methods[j].invoke(this.value, parameterList); } catch ( IllegalAccessException e) { System.out.println("IllegalAccessException " + e); throw new IllegalArgumentException ("Java access error calling setter\n"+e); } catch ( InvocationTargetException e) { System.out.println("IllegalTargetException " + e); throw new IllegalArgumentException ("Java target error calling setter\n"+e); } foundSetter = true; break; } } if (!foundSetter){ System.out.println("Parameter " + parameterName + " not found on trainer " + constructorName); System.out.println("Available parameters for " + constructorName); for (int j=0; j<methods.length; j++){ if ( methods[j].getName().startsWith("set") && methods[j].getParameterTypes().length == 1){ System.out.println(Character.toLowerCase(methods[j].getName().charAt(3)) + methods[j].getName().substring(4)); } } throw new IllegalArgumentException ("no setter found for parameter " + parameterName); } } } public java.lang.String defaultValueToString() { return defaultValue == null ? null : defaultValue.toString(); } public java.lang.String valueToString() { return value == null ? null : value.toString(); } } }
27,009
34.30719
127
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/Univariate.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; import java.util.logging.*; import cc.mallet.util.MalletLogger; // Obtained from http://www.stat.vt.edu/~sundar/java/code/Univariate.html // August 2002 /** * @(#)Univariate.java * * DAMAGE (c) 2000 by Sundar Dorai-Raj * * @author Sundar Dorai-Raj * * Email: [email protected] * * This program is free software; you can redistribute it and/or * * modify it under the terms of the GNU General Public License * * as published by the Free Software Foundation; either version 2 * * of the License, or (at your option) any later version, * * provided that any use properly credits the author. * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details at http://www.gnu.org * * */ public class Univariate { private static Logger logger = MalletLogger.getLogger(Univariate.class.getName()); private double[] x,sortx; private double[] summary=new double[6]; private boolean isSorted=false; public double[] five=new double[5]; private int n; private double mean,variance,stdev; private double median,min,Q1,Q3,max; public Univariate(double[] data) { x=(double[])data.clone(); n=x.length; createSummaryStats(); } private void createSummaryStats() { int i; mean=0; for(i=0;i<n;i++) mean+=x[i]; mean/=n; variance=variance(); stdev=stdev(); double sumxx=0; variance=0; for(i=0;i<n;i++) sumxx+=x[i]*x[i]; if(n>1) variance=(sumxx-n*mean*mean)/(n-1); stdev=Math.sqrt(variance); } public double[] summary() { summary[0]=n; summary[1]=mean; summary[2]=variance; summary[3]=stdev; summary[4]=Math.sqrt(variance/n); summary[5]=mean/summary[4]; return(summary); } public double mean() { return(mean); } public double variance() { return(variance); } public double stdev() { return(stdev); } public double SE() { return(Math.sqrt(variance/n)); } public double max() { if(!isSorted) sortx=sort(); return(sortx[n-1]); } public double min() { if(!isSorted) sortx=sort(); return(sortx[0]); } public double median() { return(quant(0.50)); } public double quant(double q) { if(!isSorted) sortx=sort(); if (q > 1 || q < 0) return (0); else { double index=(n+1)*q; if (index-(int)index == 0) return sortx[(int)index - 1]; else return q*sortx[(int)Math.floor(index)-1]+(1-q)*sortx[(int)Math.ceil(index)-1]; } } public double[] sort() { sortx=(double[])x.clone(); int incr=(int)(n*.5); while (incr >= 1) { for (int i=incr;i<n;i++) { double temp=sortx[i]; int j=i; while (j>=incr && temp<sortx[j-incr]) { sortx[j]=sortx[j-incr]; j-=incr; } sortx[j]=temp; } incr/=2; } isSorted=true; return(sortx); } public double[] getData() { return(x); } public int size() { return (n); } public double elementAt(int index) { double element=0; try { element=x[index]; } catch(ArrayIndexOutOfBoundsException e) { logger.info ("Index "+ index +" does not exist in data."); } return(element); } public double[] subset(int[] indices) { int k=indices.length,i=0; double elements[]=new double[k]; try { for(i=0;i<k;i++) elements[i]=x[k]; } catch(ArrayIndexOutOfBoundsException e) { logger.info ("Index "+ i +" does not exist in data."); } return(elements); } public int compare(double t) { int index=n-1; int i; boolean found=false; for(i=0;i<n && !found;i++) if(sortx[i]>t) { index=i; found=true; } return(index); } public int[] between(double t1,double t2) { int[] indices=new int[2]; indices[0]=compare(t1); indices[1]=compare(t2); return(indices); } public int indexOf(double element) { int index=-1; for(int i=0;i<n;i++) if(Math.abs(x[i]-element)<1e-6) index=i; return(index); } }
4,782
22.678218
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/RegexFileFilter.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; import java.io.*; import java.util.regex.Pattern; import java.util.regex.Matcher; import cc.mallet.types.Alphabet; import cc.mallet.types.Label; public class RegexFileFilter implements FileFilter { Pattern absolutePathRegex; Pattern nameRegex; public RegexFileFilter (Pattern absolutePathRegex, Pattern filenameRegex) { this.absolutePathRegex = absolutePathRegex; this.nameRegex = filenameRegex; } public RegexFileFilter (String absolutePathRegex, String filenameRegex) { this (absolutePathRegex != null ? Pattern.compile (absolutePathRegex) : null, filenameRegex != null ? Pattern.compile (filenameRegex) : null); } public RegexFileFilter (Pattern nameRegex) { this (null, nameRegex); } public RegexFileFilter (String filenameRegex) { this (filenameRegex != null ? Pattern.compile (filenameRegex) : null); } public boolean accept (File f) { boolean ret = ((absolutePathRegex == null || absolutePathRegex.matcher(f.getAbsolutePath()).matches()) && (nameRegex == null || nameRegex.matcher(f.getName()).matches())); //System.out.println ("RegexFileFilter accept "+f+" nameRegex="+nameRegex.pattern()+" ret="+ret); return ret; } }
1,795
26.212121
99
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/PropertyList.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; import java.util.Iterator; import java.util.HashSet; import java.util.HashMap; import java.io.*; public class PropertyList implements Serializable { protected PropertyList next; protected String key; public static PropertyList add (String key, Object value, PropertyList rest) { assert (key != null); return new ObjectProperty (key, value, rest); } public static PropertyList add (String key, String value, PropertyList rest) { assert (key != null); return new ObjectProperty (key, value, rest); } public static PropertyList add (String key, double value, PropertyList rest) { assert (key != null); return new NumericProperty (key, value, rest); } public static PropertyList remove (String key, PropertyList rest) { assert (key != null); return new ObjectProperty (key, null, rest); } public Object lookupObject (String key) { if (this.key.equals (key)) { if (this instanceof ObjectProperty) return ((ObjectProperty)this).value; else if (this instanceof NumericProperty) return new Double(((NumericProperty)this).value); else throw new IllegalStateException ("Unrecognitized PropertyList entry."); } else if (this.next == null) { return null; } else { return next.lookupObject (key); } } public double lookupNumber (String key) { if (this.key.equals (key)) { if (this instanceof NumericProperty) return ((NumericProperty)this).value; else if (this instanceof ObjectProperty) { Object obj = ((ObjectProperty)this).value; if (obj == null) return 0; // xxx Remove these? Use might ask for numericIterator expecting to get these (and not!) if (obj instanceof Double) return ((Double)obj).doubleValue(); if (obj instanceof Integer) return ((Double)obj).intValue(); if (obj instanceof Float) return ((Double)obj).floatValue(); if (obj instanceof Short) return ((Double)obj).shortValue(); if (obj instanceof Long) return ((Double)obj).longValue(); // xxx? throw new IllegalStateException ("Property is not numeric."); return 0; } else throw new IllegalStateException ("Unrecognitized PropertyList entry."); } else if (this.next == null) { return 0; } else { return next.lookupNumber (key); } } public boolean hasProperty (String key) { if (this.key.equals (key)) { if (this instanceof ObjectProperty && ((ObjectProperty)this).value == null) return false; else return true; } else if (this.next == null) { return false; } else { return next.hasProperty (key); } } public Iterator iterator () { return new Iterator (this); } public static PropertyList sumDuplicateKeyValues (PropertyList pl ) { return sumDuplicateKeyValues(pl,false); } // culotta 2/02/04: to increment counts of properties values. public static PropertyList sumDuplicateKeyValues (PropertyList pl, boolean ignoreZeros) { if (!(pl instanceof NumericProperty)) throw new IllegalArgumentException ("PropertyList must be Numeric to sum values"); HashMap key2value = new HashMap (); Iterator iter = pl.numericIterator(); while (iter.hasNext()) { iter.nextProperty (); String key = iter.getKey(); double val = iter.getNumericValue(); Double storedValue = (Double)key2value.get (key); if (storedValue == null) key2value.put (key, new Double (val)); else // sum stored value with current value key2value.put (key, new Double (storedValue.doubleValue() + val)); } PropertyList ret = null; java.util.Iterator hashIter = key2value.keySet().iterator(); while (hashIter.hasNext()) { // create new property list String key = (String) hashIter.next(); double val = ((Double)key2value.get (key)).doubleValue(); if(ignoreZeros && val==0.0) continue; ret = PropertyList.add (key, val, ret); } return ret; } public Iterator numericIterator () { return new NumericIterator (this); } public Iterator objectIterator () { return new ObjectIterator (this); } protected PropertyList () { throw new IllegalArgumentException ("Zero args constructor not allowed."); } protected PropertyList (String key, PropertyList rest) { this.key = key; this.next = rest; } public void print () { if (this instanceof NumericProperty) System.out.println (this.key.toString() + "=" + ((NumericProperty)this).value); else if (this instanceof ObjectProperty) System.out.println (this.key.toString() + "=" + ((ObjectProperty)this).value); else throw new IllegalArgumentException ("Unrecognized PropertyList type"); if (this.next != null) this.next.print(); } // Serialization // PropertyList private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject(next); out.writeObject(key); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); next = (PropertyList) in.readObject(); key = (String) in.readObject(); } public int size () { PropertyList pl = this; int size = 1; while (pl.next != null) { pl = pl.next; size++; } return size; } private static class NumericProperty extends PropertyList implements Serializable { protected double value; public NumericProperty (String key, double value, PropertyList rest) { super (key, rest); this.value = value; } // Serialization // NumericProperty private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt(CURRENT_SERIAL_VERSION); out.writeDouble(value); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt(); value = in.readDouble(); } } private static class ObjectProperty extends PropertyList { protected Object value; public ObjectProperty (String key, Object value, PropertyList rest) { super (key, rest); this.value = value; } // Serialization // ObjectProperty private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt(CURRENT_SERIAL_VERSION); out.writeObject(value); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt(); value = (Object) in.readObject(); } } public class Iterator implements java.util.Iterator, Serializable { PropertyList property, nextProperty; HashSet deletedKeys = null; boolean nextCalled = false; boolean returnNumeric = true; boolean returnObject = true; public Iterator (PropertyList pl) { property = findReturnablePropertyAtOrAfter (pl); if (property == null) nextProperty = null; else nextProperty = findReturnablePropertyAtOrAfter (property.next); } private PropertyList findReturnablePropertyAtOrAfter (PropertyList property) { while (property != null) { if (property instanceof NumericProperty && returnNumeric) { if (((NumericProperty)property).value == 0.0) { if (deletedKeys == null) deletedKeys = new HashSet(); deletedKeys.add (property.key); property = property.next; } else break; } else if (property instanceof ObjectProperty && returnObject) { if (((ObjectProperty)property).value == null) { if (deletedKeys == null) deletedKeys = new HashSet(); deletedKeys.add (property.key); property = property.next; } else break; } else throw new IllegalStateException ("Unrecognized property type "+property.getClass().getName()); } return property; } public boolean hasNext () { return ((nextCalled && nextProperty != null) || (!nextCalled && property != null)); } public boolean isNumeric () { return (property instanceof NumericProperty); } public double getNumericValue () { return ((NumericProperty)property).value; } public Object getObjectValue () { return ((ObjectProperty)property).value; } public String getKey () { return property.key; } public PropertyList nextProperty () { if (nextCalled) { property = nextProperty; nextProperty = findReturnablePropertyAtOrAfter (property.next); } else nextCalled = true; return property; } public Object next () { return nextProperty (); } public void remove () { throw new UnsupportedOperationException (); } // Serialization // Iterator private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject(property); out.writeObject(nextProperty); out.writeObject(deletedKeys); out.writeBoolean(nextCalled); out.writeBoolean(returnNumeric); out.writeBoolean(returnObject); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); property = (PropertyList) in.readObject(); nextProperty = (PropertyList) in.readObject(); deletedKeys = (HashSet) in.readObject(); nextCalled = in.readBoolean(); returnNumeric = in.readBoolean(); returnObject = in.readBoolean(); } } public class NumericIterator extends Iterator implements Serializable { public NumericIterator (PropertyList pl) { super (pl); this.returnObject = false; } } public class ObjectIterator extends Iterator implements Serializable { public ObjectIterator (PropertyList pl) { super (pl); this.returnNumeric = false; } } // for fast merging of PropertLists // gmann 8/14/6 public PropertyList last(){ if (next == null){ return this; } else return next.last(); } public PropertyList append(PropertyList nextPl) throws UnsupportedOperationException{ if (this.next != null){ throw new UnsupportedOperationException("PropertyList.java: Cannot append to middle of a list\n"); } this.next = nextPl; return last(); } }
11,015
25.417266
103
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/Addable.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; public interface Addable { public boolean add (Object o); }
609
28.047619
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/ProgressMessageLogFormatter.java
package cc.mallet.util; import java.util.logging.*; import java.util.Arrays; /** * Format ProgressMessages destined for screen. * Progress messages are repetitive messages, of which only * the last one is generally of interest. * Progress messages are a subclass of LogRecord, generated * by a progressMessageLogger. * When printing a progress message, we carriage return but * supress the line-feed. * If we get a message that is not a progressMessage, print * it the same way PlainLogFormatter does. * todo: capture the formatter that was on the console * (usually a plainlogformatter) and defer to it when * needed. */ public class ProgressMessageLogFormatter extends SimpleFormatter { boolean lastMessageWasProgressMessage=false; int lastProgressMessageLength=0; public ProgressMessageLogFormatter() { super(); } public String format (LogRecord record) { int length = record.getMessage().length(); if (record instanceof ProgressMessageLogRecord){ String suffix = ""; if (lastMessageWasProgressMessage && lastProgressMessageLength>length){ // pad with trailing blanks if previous message was shorter than ours. int padding = lastProgressMessageLength-length; char []c = new char[padding]; Arrays.fill(c, ' '); suffix = new String(c); } lastMessageWasProgressMessage = true; lastProgressMessageLength = length; return record.getMessage() + suffix + "\r"; }else{ String prefix = lastMessageWasProgressMessage? "\n" : ""; lastMessageWasProgressMessage = false; return prefix + record.getMessage()+ "\n"; } } }
1,592
31.510204
74
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/BshInterpreter.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.util; import java.io.*; import java.util.*; import bsh.Interpreter; public class BshInterpreter extends bsh.Interpreter { Interpreter interpreter; public BshInterpreter (String prefixCommands) { try { eval ( "import java.util.*;"+ "import java.util.regex.*;"+ "import java.io.*;"+ "import cc.mallet.types.*;"+ "import cc.mallet.pipe.*;"+ "import cc.mallet.pipe.iterator.*;"+ "import cc.mallet.pipe.tsf.*;"+ "import cc.mallet.classify.*;"+ "import cc.mallet.extract.*;"+ "import cc.mallet.fst.*;"+ "import cc.mallet.optimize.*;"); if (prefixCommands != null) eval (prefixCommands); } catch (bsh.EvalError e) { throw new IllegalArgumentException ("bsh Interpreter error: "+e); } } public BshInterpreter () { this (null); } }
1,238
25.361702
76
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/ArrayUtils.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.util; import gnu.trove.TDoubleProcedure; import gnu.trove.TObjectProcedure; import java.lang.reflect.Array; /** * Static utility methods for arrays * (like java.util.Arrays, but more useful). * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: ArrayUtils.java,v 1.1 2007/10/22 21:37:40 mccallum Exp $ */ final public class ArrayUtils { private ArrayUtils() {} public static int indexOf (Object[] array, Object obj) { for (int i = 0; i < array.length; i++) { if ((array[i] != null) && array[i].equals (obj)) { return i; } } return -1; } public static int indexOf (int[] array, int obj) { for (int i = 0; i < array.length; i++) { if (array[i] == obj) { return i; } } return -1; } /** * Returns true if the procedure proc returns true for any * element of the array v. */ public static boolean any (TDoubleProcedure proc, double[] v) { for (int i = 0; i < v.length; i++) { if (proc.execute (v[i])) { return true; } } return false; } /** * Returns true if the procedure proc returns true for any * element of the array v. */ public static boolean any (TObjectProcedure proc, Object[][] v) { for (int i = 0; i < v.length; i++) { for (int j = 0; j < v[i].length; j++) { if (proc.execute (v[i][j])) { return true; } } } return false; } public static void forEach (TObjectProcedure proc, Object[] v) { for (int i = 0; i < v.length; i++) { proc.execute (v[i]); } } public static void forEach (TObjectProcedure proc, Object[][] v) { for (int i = 0; i < v.length; i++) { for (int j = 0; j < v[i].length; j++) { proc.execute (v[i][j]); } } } public static void print (double[] v) { System.out.print ("["); for (int i = 0; i < v.length; i++) System.out.print (" " + v[i]); System.out.println (" ]"); } public static void print (int[] v) { System.out.print ("["); for (int i = 0; i < v.length; i++) System.out.print (" " + v[i]); System.out.println (" ]"); } public static String toString (int[] v) { StringBuffer buf = new StringBuffer (); for (int i = 0; i < v.length; i++) { buf.append (v[i]); if (i < v.length - 1) buf.append (" "); } return buf.toString (); } public static String toString (double[] v) { StringBuffer buf = new StringBuffer (); for (int i = 0; i < v.length; i++) { buf.append (v[i]); if (i < v.length - 1) buf.append (" "); } return buf.toString (); } public static String toString (Object[] v) { StringBuffer buf = new StringBuffer (); for (int i = 0; i < v.length; i++) { buf.append (v[i]); if (i < v.length - 1) buf.append (" "); } return buf.toString (); } /** * Returns a new array containing all of a, with additional extra space added (zero initialized). * @param a * @param additional * @return */ public static int[] extend (int[] a, int additional) { int[] ret = new int[a.length + additional]; System.arraycopy(a, 0, ret, 0, a.length); return ret; } /** * Returns a new array containing all of a, with additional extra space added (zero initialized). * @param a * @param additional * @return */ public static double[] extend (double[] a, int additional) { double[] ret = new double[a.length + additional]; System.arraycopy(a, 0, ret, 0, a.length); return ret; } /** * Returns a new array that is the concatenation of a1 and a2. * @param a1 * @param a2 * @return */ public static int[] append (int[] a1, int[] a2) { int[] ret = new int[a1.length + a2.length]; System.arraycopy(a1, 0, ret, 0, a1.length); System.arraycopy(a2, 0, ret, a1.length, a2.length); return ret; } /** * Returns a new array that is the concatenation of a1 and a2. * @param a1 * @param a2 * @return */ public static double[] append (double[] a1, double[] a2) { double[] ret = new double[a1.length + a2.length]; System.arraycopy(a1, 0, ret, 0, a1.length); System.arraycopy(a2, 0, ret, a1.length, a2.length); return ret; } /** * Returns a new array with a single element appended at the end. * Use this sparingly, for it will allocate a new array. You can * easily turn a linear-time algorithm to quadratic this way. * @param v Original array * @param elem Element to add to end */ public static int[] append (int[] v, int elem) { int[] ret = new int [v.length + 1]; System.arraycopy (v, 0, ret, 0, v.length); ret[v.length] = elem; return ret; } /** * Returns a new array with a single element appended at the end. * Use this sparingly, for it will allocate a new array. You can * easily turn a linear-time algorithm to quadratic this way. * @param v Original array * @param elem Element to add to end */ public static boolean[] append (boolean[] v, boolean elem) { boolean[] ret = new boolean [v.length + 1]; System.arraycopy (v, 0, ret, 0, v.length); ret[v.length] = elem; return ret; } /** * Returns a new array with a single element appended at the end. * Use this sparingly, for it will allocate a new array. You can * easily turn a linear-time algorithm to quadratic this way. * @param v Original array * @param elem Element to add to end * @return Array with length v+1 that is (v0,v1,...,vn,elem). * Runtime type will be same as he pased-in array. */ public static Object[] append (Object[] v, Object elem) { Object[] ret = (Object[]) Array.newInstance (v.getClass().getComponentType(), v.length+1); System.arraycopy (v, 0, ret, 0, v.length); ret[v.length] = elem; return ret; } /* public static Object[] cloneArray (Cloneable[] arr) { // Do this magic so that it can be cast to original type when done Object[] aNew = (Object[]) Array.newInstance (arr.getClass().getComponentType(), arr.length); for (int i = 0; i < arr.length; i++) { aNew [i] = arr[i].clone (); } return aNew; } */ /** Returns the number of times a value occurs in a given array. */ public static double count (int[] sampled, int val) { int count = 0; for (int i = 0; i < sampled.length; i++) { if (sampled[i] == val) { count++; } } return count; } public static int argmax (double [] elems) { int bestIdx = -1; double max = Double.NEGATIVE_INFINITY; for (int i = 0; i < elems.length; i++) { double elem = elems[i]; if (elem > max) { max = elem; bestIdx = i; } } return bestIdx; } public static boolean equals (boolean[][] m1, boolean[][] m2) { if (m1.length != m2.length) return false; for (int i = 0; i < m1.length; i++) { if (m1[i].length != m2[i].length) return false; for (int j = 0; j < m1[i].length; j++) { boolean b1 = m1[i][j]; boolean b2 = m2[i][j]; if (b1 != b2) return false; } } return true; } } // Arrays
7,563
24.640678
99
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/DoubleList.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** A dynamically growable list of doubles. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; import java.util.Arrays; import java.io.*; public class DoubleList implements Serializable { double[] data; int size; public DoubleList () { this (2); } // Creates a list of zero size public DoubleList (int capacity) { if (capacity < 2) capacity = 2; this.data = new double[capacity]; this.size = 0; } public DoubleList (int size, double fillValue) { int capacity = size; if (capacity < 2) capacity = 2; this.data = new double[capacity]; Arrays.fill (this.data, fillValue); this.size = size; } public DoubleList (double[] initialValues, int size) { this.data = new double[initialValues.length]; System.arraycopy (initialValues, 0, this.data, 0, initialValues.length); this.size = size; } public DoubleList (double[] initialValues) { this (initialValues, initialValues.length); } public DoubleList cloneDoubleList () { return new DoubleList (data, size); } public Object clone () { return cloneDoubleList (); } private void growIfNecessary (int index) { int newDataLength = data.length; while (index >= newDataLength) { if (newDataLength < 100) newDataLength *= 2; else newDataLength = (newDataLength * 3) / 2; } if (newDataLength != data.length) { double[] newData = new double[newDataLength]; System.arraycopy (data, 0, newData, 0, data.length); data = newData; } } public void add (double value) { growIfNecessary (size); data[size++] = value; } public double get (int index) { if (index >= size) throw new IllegalArgumentException ("Index "+index+" out of bounds; size="+size); return data[index]; } public void set (int index, double value) { growIfNecessary (index); data[index] = value; if (index >= size) size = index+1; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); int size = data.length; out.writeInt(size); for (int i=1; i<size; i++) { out.writeDouble(data[i]); } out.writeInt(this.size); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); int len = in.readInt(); data = new double[len]; for (int i = 1; i<len; i++) { data[i] = in.readDouble(); } size = in.readInt(); } }
3,018
21.036496
92
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/ArrayListUtils.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; import java.util.ArrayList; public class ArrayListUtils { // xxx Why not just use java.util.Arrays.asList (Object[] a) -cas static public ArrayList createArrayList (Object[] a) { ArrayList al = new ArrayList (a.length); for (int i = 0; i < a.length; i++) al.add (a[i]); return al; } // Useful until java 1.5 -ghuang static public int[] toIntArray (ArrayList list) { int[] result = new int[list.size()]; for (int i = 0; i < list.size(); i++) { Number n = (Number) list.get(i); result[i] = n.intValue(); } return result; } // Useful until java 1.5 -ghuang static public double[] toDoubleArray (ArrayList list) { double[] result = new double[list.size()]; for (int i = 0; i < list.size(); i++) { Number n = (Number) list.get(i); result[i] = n.doubleValue(); } return result; } }
1,384
23.298246
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/MalletProgressMessageLogger.java
package cc.mallet.util; import java.util.logging.*; /** * Created by IntelliJ IDEA. * User: hough * Date: Feb 10, 2004 * Time: 4:37:07 PM * To change this template use Options | File Templates. */ public class MalletProgressMessageLogger extends MalletLogger{ protected MalletProgressMessageLogger (String name, String resourceBundleName) { super (name, resourceBundleName); } public static Logger getLogger (String name) { MalletProgressMessageLogger mpml = new MalletProgressMessageLogger(name, null); LogManager.getLogManager().addLogger(mpml); return mpml; } public void log(LogRecord logRecord) { // convert to subclass of logRecord, and pass it on.. // I'm sure this is losing information... //System.out.println("MPML log record entered " +logRecord); ProgressMessageLogRecord progressMessageLogRecord = new ProgressMessageLogRecord(logRecord); super.log(progressMessageLogRecord); // // //getParent().log(progressMessageLogRecord); // //try doing the dispatch ourselves. Fewer classes to override... // // Whole reason for overriding is so we can not send things to console twice // // once in progress message; once in parent.. // // // I think this is some approximation of what java.util.logger.log() does // //todo: add level test and filtering // // Logger currentLogger = this; // boolean sentToConsole = false; // boolean useParentHandlers = getUseParentHandlers(); // while (currentLogger != null){ // Handler[] handlers = currentLogger.getHandlers(); // for(int i=0; i<handlers.length; i++){ // if (handlers[i] instanceof ConsoleHandler){ // if (!sentToConsole){ // handlers[i].publish(progressMessageLogRecord); // sentToConsole = true; // } // }else{ // handlers[i].publish(progressMessageLogRecord); // } // } // // if (useParentHandlers) { // currentLogger = currentLogger.getParent(); // } else { // currentLogger = null; // } // } } }
1,971
28.432836
94
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/Strings.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.util; import java.util.Arrays; /** * Static utility methods for Strings */ final public class Strings { public static int commonPrefixIndex (String[] strings) { int prefixLen = strings[0].length(); for (int i = 1; i < strings.length; i++) { if (strings[i].length() < prefixLen) prefixLen = strings[i].length(); int j = 0; if (prefixLen == 0) return 0; while (j < prefixLen) { if (strings[i-1].charAt(j) != strings[i].charAt(j)) { prefixLen = j; break; } j++; } } return prefixLen; } public static String commonPrefix (String[] strings) { return strings[0].substring (0, commonPrefixIndex(strings)); } public static int count (String string, char ch) { int idx = -1; int count = 0; while ((idx = string.indexOf (ch, idx+1)) >= 0) { count++; }; return count; } public static double levenshteinDistance (String s, String t) { int n = s.length(); int m = t.length(); int d[][]; // matrix int i; // iterates through s int j; // iterates through t char s_i; // ith character of s char t_j; // jth character of t int cost; // cost if (n == 0) return 1.0; if (m == 0) return 1.0; d = new int[n+1][m+1]; for (i = 0; i <= n; i++) d[i][0] = i; for (j = 0; j <= m; j++) d[0][j] = j; for (i = 1; i <= n; i++) { s_i = s.charAt (i - 1); for (j = 1; j <= m; j++) { t_j = t.charAt (j - 1); cost = (s_i == t_j) ? 0 : 1; d[i][j] = minimum (d[i-1][j]+1, d[i][j-1]+1, d[i-1][j-1] + cost); } } int longer = (n > m) ? n : m; return (double)d[n][m] / longer; // Normalize to 0-1. } private static int minimum (int a, int b, int c) { int mi = a; if (b < mi) { mi = b; } if (c < mi) { mi = c; } return mi; } }
2,282
21.83
76
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/Randoms.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; import java.util.*; public class Randoms extends java.util.Random { public Randoms (int seed) { super(seed); } public Randoms () { super(); } /** Return random integer from Poission with parameter lambda. * The mean of this distribution is lambda. The variance is lambda. */ public synchronized int nextPoisson(double lambda) { int i,j,v=-1; double l=Math.exp(-lambda),p; p=1.0; while (p>=l) { p*=nextUniform(); v++; } return v; } /** Return nextPoisson(1). */ public synchronized int nextPoisson() { return nextPoisson(1); } /** Return a random boolean, equally likely to be true or false. */ public synchronized boolean nextBoolean() { return (next(32) & 1 << 15) != 0; } /** Return a random boolean, with probability p of being true. */ public synchronized boolean nextBoolean(double p) { double u=nextUniform(); if(u < p) return true; return false; } /** Return a random BitSet with "size" bits, each having probability p of being true. */ public synchronized BitSet nextBitSet (int size, double p) { BitSet bs = new BitSet (size); for (int i = 0; i < size; i++) if (nextBoolean (p)) { bs.set (i); } return bs; } /** Return a random double in the range 0 to 1, inclusive, uniformly sampled from that range. * The mean of this distribution is 0.5. The variance is 1/12. */ public synchronized double nextUniform() { long l = ((long)(next(26)) << 27) + next(27); return l / (double)(1L << 53); } /** Return a random double in the range a to b, inclusive, uniformly sampled from that range. * The mean of this distribution is (b-a)/2. The variance is (b-a)^2/12 */ public synchronized double nextUniform(double a,double b) { return a + (b-a)*nextUniform(); } /** Draw a single sample from multinomial "a". */ public synchronized int nextDiscrete (double[] a) { double b = 0, r = nextUniform(); for (int i = 0; i < a.length; i++) { b += a[i]; if (b > r) { return i; } } return a.length-1; } /** draw a single sample from (unnormalized) multinomial "a", with normalizing factor "sum". */ public synchronized int nextDiscrete (double[] a, double sum) { double b = 0, r = nextUniform() * sum; for (int i = 0; i < a.length; i++) { b += a[i]; if (b > r) { return i; } } return a.length-1; } private double nextGaussian; private boolean haveNextGaussian = false; /** Return a random double drawn from a Gaussian distribution with mean 0 and variance 1. */ public synchronized double nextGaussian() { if (!haveNextGaussian) { double v1=nextUniform(),v2=nextUniform(); double x1,x2; x1=Math.sqrt(-2*Math.log(v1))*Math.cos(2*Math.PI*v2); x2=Math.sqrt(-2*Math.log(v1))*Math.sin(2*Math.PI*v2); nextGaussian=x2; haveNextGaussian=true; return x1; } else { haveNextGaussian=false; return nextGaussian; } } /** Return a random double drawn from a Gaussian distribution with mean m and variance s2. */ public synchronized double nextGaussian(double m,double s2) { return nextGaussian()*Math.sqrt(s2)+m; } // generate Gamma(1,1) // E(X)=1 ; Var(X)=1 /** Return a random double drawn from a Gamma distribution with mean 1.0 and variance 1.0. */ public synchronized double nextGamma() { return nextGamma(1,1,0); } /** Return a random double drawn from a Gamma distribution with mean alpha and variance 1.0. */ public synchronized double nextGamma(double alpha) { return nextGamma(alpha,1,0); } /* Return a sample from the Gamma distribution, with parameter IA */ /* From Numerical "Recipes in C", page 292 */ public synchronized double oldNextGamma (int ia) { int j; double am, e, s, v1, v2, x, y; assert (ia >= 1) ; if (ia < 6) { x = 1.0; for (j = 1; j <= ia; j++) x *= nextUniform (); x = - Math.log (x); } else { do { do { do { v1 = 2.0 * nextUniform () - 1.0; v2 = 2.0 * nextUniform () - 1.0; } while (v1 * v1 + v2 * v2 > 1.0); y = v2 / v1; am = ia - 1; s = Math.sqrt (2.0 * am + 1.0); x = s * y + am; } while (x <= 0.0); e = (1.0 + y * y) * Math.exp (am * Math.log (x/am) - s * y); } while (nextUniform () > e); } return x; } /** Return a random double drawn from a Gamma distribution with mean alpha*beta and variance alpha*beta^2. */ public synchronized double nextGamma(double alpha, double beta) { return nextGamma(alpha,beta,0); } /** Return a random double drawn from a Gamma distribution * with mean alpha*beta+lamba and variance alpha*beta^2. * Note that this means the pdf is: * <code>frac{ x^{alpha-1} exp(-x/beta) }{ beta^alpha Gamma(alpha) }</code> * in other words, beta is a "scale" parameter. An alternative * parameterization would use 1/beta, the "rate" parameter. */ public synchronized double nextGamma(double alpha, double beta, double lambda) { double gamma=0; if (alpha <= 0 || beta <= 0) { throw new IllegalArgumentException ("alpha and beta must be strictly positive."); } if (alpha < 1) { double b,p; boolean flag = false; b = 1 + alpha * Math.exp(-1); while (!flag) { p = b * nextUniform(); if (p > 1) { gamma = -Math.log((b - p) / alpha); if (nextUniform() <= Math.pow(gamma, alpha - 1)) { flag = true; } } else { gamma = Math.pow(p, 1.0/alpha); if (nextUniform() <= Math.exp(-gamma)) { flag = true; } } } } else if (alpha == 1) { // Gamma(1) is equivalent to Exponential(1). We can // sample from an exponential by inverting the CDF: gamma = -Math.log (nextUniform ()); // There is no known closed form for Gamma(alpha != 1)... } else { // This is Best's algorithm: see pg 410 of // Luc Devroye's "non-uniform random variate generation" // This algorithm is constant time for alpha > 1. double b = alpha - 1; double c = 3 * alpha - 0.75; double u, v; double w, y, z; boolean accept = false; while (! accept) { u = nextUniform(); v = nextUniform(); w = u * (1 - u); y = Math.sqrt( c / w ) * (u - 0.5); gamma = b + y; if (gamma >= 0.0) { z = 64 * w * w * w * v * v; // ie: 64 * w^3 v^2 accept = z <= 1.0 - ((2 * y * y) / gamma); if (! accept) { accept = (Math.log(z) <= 2 * (b * Math.log(gamma / b) - y)); } } } /* // Old version, uses time linear in alpha double y = -Math.log (nextUniform ()); while (nextUniform () > Math.pow (y * Math.exp (1 - y), alpha - 1)) y = -Math.log (nextUniform ()); gamma = alpha * y; */ } return beta*gamma+lambda; } /** Return a random double drawn from an Exponential distribution with mean 1 and variance 1. */ public synchronized double nextExp() { return nextGamma(1,1,0); } /** Return a random double drawn from an Exponential distribution with mean beta and variance beta^2. */ public synchronized double nextExp(double beta) { return nextGamma(1,beta,0); } /** Return a random double drawn from an Exponential distribution with mean beta+lambda and variance beta^2. */ public synchronized double nextExp(double beta,double lambda) { return nextGamma(1,beta,lambda); } /** Return a random double drawn from an Chi-squarted distribution with mean 1 and variance 2. * Equivalent to nextChiSq(1) */ public synchronized double nextChiSq() { return nextGamma(0.5,2,0); } /** Return a random double drawn from an Chi-squared distribution with mean df and variance 2*df. */ public synchronized double nextChiSq(int df) { return nextGamma(0.5*(double)df,2,0); } /** Return a random double drawn from an Chi-squared distribution with mean df+lambda and variance 2*df. */ public synchronized double nextChiSq(int df,double lambda) { return nextGamma(0.5*(double)df,2,lambda); } /** Return a random double drawn from a Beta distribution with mean a/(a+b) and variance ab/((a+b+1)(a+b)^2). */ public synchronized double nextBeta(double alpha,double beta) { if (alpha <= 0 || beta <= 0) { throw new IllegalArgumentException ("alpha and beta must be strictly positive."); } if (alpha == 1 && beta == 1) { return nextUniform (); } else if (alpha >= 1 && beta >= 1) { double A = alpha - 1, B = beta - 1, C = A + B, L = C * Math.log (C), mu = A / C, sigma = 0.5 / Math.sqrt (C); double y = nextGaussian (), x = sigma * y + mu; while (x < 0 || x > 1) { y = nextGaussian (); x = sigma * y + mu; } double u = nextUniform (); while (Math.log (u) >= A * Math.log (x / A) + B * Math.log ((1 - x) / B) + L + 0.5 * y * y) { y = nextGaussian (); x = sigma * y + mu; while (x < 0 || x > 1) { y = nextGaussian (); x = sigma * y + mu; } u = nextUniform (); } return x; } else { double v1 = Math.pow (nextUniform (), 1 / alpha), v2 = Math.pow (nextUniform (), 1 / beta); while (v1 + v2 > 1) { v1 = Math.pow (nextUniform (), 1 / alpha); v2 = Math.pow (nextUniform (), 1 / beta); } return v1 / (v1 + v2); } } /** Wrap this instance as a java random, so it can be passed to legacy methods. * All methods of the returned java.util.Random object will affect the state of * this object, as well. */ @Deprecated // This should no longer be necessary, since Randoms is now a subclass of java.util.random anyway public java.util.Random asJavaRandom () { return new java.util.Random () { protected int next (int bits) { return cc.mallet.util.Randoms.this.next (bits); } }; } public static void main (String[] args) { // Prints the nextGamma() and oldNextGamma() distributions to // System.out for testing/comparison. Randoms r = new Randoms(); final int resolution = 60; int[] histogram1 = new int[resolution]; int[] histogram2 = new int[resolution]; int scale = 10; for (int i = 0; i < 10000; i++) { double x = 4; int index1 = ((int)(r.nextGamma(x)/scale * resolution)) % resolution; int index2 = ((int)(r.oldNextGamma((int)x)/scale * resolution)) % resolution; histogram1[index1]++; histogram2[index2]++; } for (int i = 0; i < resolution; i++) { for (int y = 0; y < histogram1[i]/scale; y++) System.out.print("*"); System.out.print("\n"); for (int y = 0; y < histogram2[i]/scale; y++) System.out.print("-"); System.out.print("\n"); } } }
11,421
27.989848
115
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/CharSequenceLexer.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; import java.io.*; import java.lang.CharSequence; import java.util.Iterator; import java.util.regex.Pattern; import java.util.regex.Matcher; import java.io.*; import cc.mallet.util.Lexer; public class CharSequenceLexer implements Lexer, Serializable { // Some predefined lexing rules public static final Pattern LEX_ALPHA = Pattern.compile ("\\p{Alpha}+"); public static final Pattern LEX_WORDS = Pattern.compile ("\\w+"); public static final Pattern LEX_NONWHITESPACE_TOGETHER = Pattern.compile ("\\S+"); public static final Pattern LEX_WORD_CLASSES = Pattern.compile ("\\p{Alpha}+|\\p{Digit}+"); public static final Pattern LEX_NONWHITESPACE_CLASSES = Pattern.compile ("\\p{Alpha}+|\\p{Digit}+|\\p{Punct}"); // Lowercase letters and uppercase letters public static final Pattern UNICODE_LETTERS = Pattern.compile("[\\p{Ll}&&\\p{Lu}]+"); Pattern regex; Matcher matcher = null; CharSequence input; String matchText; boolean matchTextFresh; public CharSequenceLexer () { this (LEX_ALPHA); } public CharSequenceLexer (Pattern regex) { this.regex = regex; setCharSequence (null); } public CharSequenceLexer (String regex) { this (Pattern.compile (regex)); } public CharSequenceLexer (CharSequence input, Pattern regex) { this (regex); setCharSequence (input); } public CharSequenceLexer (CharSequence input, String regex) { this (input, Pattern.compile (regex)); } public void setCharSequence (CharSequence input) { this.input = input; this.matchText = null; this.matchTextFresh = false; if (input != null) this.matcher = regex.matcher(input); } public CharSequence getCharSequence() { return input; } public String getPattern() { return regex.pattern(); } public void setPattern(String reg)// added by Fuchun { if(!regex.equals( getPattern() )){ this.regex = Pattern.compile(reg); // this.matcher = regex.matcher(input); } } public int getStartOffset () { if (matchText == null) return -1; return matcher.start(); } public int getEndOffset () { if (matchText == null) return -1; return matcher.end(); } public String getTokenString () { return matchText; } // Iterator interface methods private void updateMatchText () { if (matcher != null && matcher.find()) { matchText = matcher.group(); if (matchText.length() == 0) { // xxx Why would this happen? // It is happening to me when I use the regex ".*" in an attempt to make // Token's out of entire lines of text. -akm. updateMatchText(); //System.err.println ("Match text is empty!"); } //matchText = input.subSequence (matcher.start(), matcher.end()).toString (); } else matchText = null; matchTextFresh = true; } public boolean hasNext () { if (! matchTextFresh) updateMatchText (); return (matchText != null); } public Object next () { if (! matchTextFresh) updateMatchText (); matchTextFresh = false; return matchText; } public void remove () { throw new UnsupportedOperationException (); } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); // xxx hmph... Pattern.java seems to have serialization // problems. Work around: serialize the String and flags // representing the regex, and recompile Pattern. if (CURRENT_SERIAL_VERSION == 0) out.writeObject (regex); else if (CURRENT_SERIAL_VERSION == 1) { out.writeObject (regex.pattern()); out.writeInt (regex.flags()); //out.writeBoolean(matchTextFresh); } out.writeBoolean (matchTextFresh); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); if (version == 0) regex = (Pattern) in.readObject(); else if (version == 1) { String p = (String) in.readObject(); int flags = in.readInt(); regex = Pattern.compile (p, flags); } matchTextFresh = in.readBoolean(); } public static void main (String[] args) { try { BufferedReader in = new BufferedReader(new FileReader(args[0])); for (String line = in.readLine(); line != null; line = in.readLine()) { CharSequenceLexer csl = new CharSequenceLexer (line, LEX_NONWHITESPACE_CLASSES ); while (csl.hasNext()) System.out.println (csl.next()); } } catch (Exception e) { System.out.println (e.toString()); } } }
5,069
23.258373
92
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/ProgressMessageLogRecord.java
package cc.mallet.util; import java.util.logging.LogRecord; /** * A log message that is to be written in place (no newline) * if the message is headed for the user's terminal. */ public class ProgressMessageLogRecord extends LogRecord { public ProgressMessageLogRecord(LogRecord logRecord) { super(logRecord.getLevel(), logRecord.getMessage()); } }
361
20.294118
60
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/IoUtils.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; import java.io.*; public class IoUtils { public static CharSequence contentsAsCharSequence (Reader reader) throws java.io.IOException { final int BUFSIZE = 2048; char[] buf = new char[BUFSIZE]; int count; StringBuffer sb = new StringBuffer (BUFSIZE); do { count = reader.read (buf, 0, BUFSIZE); if (count == -1) break; //System.out.println ("count="+count); sb.append (buf, 0, count); } while (count == BUFSIZE); return sb; } public static String contentsAsString (File f) throws java.io.IOException { assert (f != null); return contentsAsCharSequence (new BufferedReader (new FileReader (f))).toString(); } }
1,203
25.755556
93
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/BulkLoader.java
package cc.mallet.util; import cc.mallet.types.*; import cc.mallet.pipe.*; import cc.mallet.pipe.iterator.*; import java.util.*; import java.io.*; /** * This class reads through a single file, breaking each line * into data and (optional) name and label fields. */ public class BulkLoader { static CommandOption.File inputFile = new CommandOption.File (BulkLoader.class, "input", "FILE", true, null, "The file containing data, one instance per line", null); static CommandOption.File outputFile = new CommandOption.File (BulkLoader.class, "output", "FILE", true, new File("mallet.data"), "Write the instance list to this file", null); static CommandOption.Boolean preserveCase = new CommandOption.Boolean (BulkLoader.class, "preserve-case", "[TRUE|FALSE]", false, false, "If true, do not force all strings to lowercase.", null); static CommandOption.Boolean removeStopWords = new CommandOption.Boolean (BulkLoader.class, "remove-stopwords", "[TRUE|FALSE]", false, false, "If true, remove common \"stop words\" from the text.\nThis option invokes a minimal English stoplist. ", null); static CommandOption.File stoplistFile = new CommandOption.File (BulkLoader.class, "stoplist", "FILE", true, null, "Read newline-separated words from this file,\n and remove them from text. This option overrides\n the default English stoplist triggered by --remove-stopwords.", null); static CommandOption.Boolean keepSequence = new CommandOption.Boolean (BulkLoader.class, "keep-sequence", "[TRUE|FALSE]", false, false, "If true, final data will be a FeatureSequence rather than a FeatureVector.", null); static CommandOption.String lineRegex = new CommandOption.String (BulkLoader.class, "line-regex", "REGEX", true, "^([^\\t]*)\\t([^\\t]*)\\t(.*)", "Regular expression containing regex-groups for label, name and data.", null); static CommandOption.Integer nameGroup = new CommandOption.Integer (BulkLoader.class, "name", "INTEGER", true, 1, "The index of the group containing the instance name.\n Use 0 to indicate that this field is not used.", null); static CommandOption.Integer labelGroup = new CommandOption.Integer (BulkLoader.class, "label", "INTEGER", true, 2, "The index of the group containing the label string.\n Use 0 to indicate that this field is not used.", null); static CommandOption.Integer dataGroup = new CommandOption.Integer (BulkLoader.class, "data", "INTEGER", true, 3, "The index of the group containing the data.", null); static CommandOption.Integer pruneCount = new CommandOption.Integer (BulkLoader.class, "prune-count", "N", false, 0, "Reduce features to those that occur more than N times.", null); /** * Read the data from inputFile, then write all the words * that do not occur <tt>pruneCount.value</tt> times or more to the pruned word file. * * @param prunedTokenizer the tokenizer that will be used to write instances */ public static void generateStoplist(SimpleTokenizer prunedTokenizer) throws IOException { CsvIterator reader = new CsvIterator(new FileReader(inputFile.value), lineRegex.value, dataGroup.value, labelGroup.value, nameGroup.value); ArrayList<Pipe> pipes = new ArrayList<Pipe>(); Alphabet alphabet = new Alphabet(); CharSequenceLowercase csl = new CharSequenceLowercase(); SimpleTokenizer st = prunedTokenizer.deepClone(); StringList2FeatureSequence sl2fs = new StringList2FeatureSequence(alphabet); FeatureCountPipe featureCounter = new FeatureCountPipe(alphabet, null); if (! preserveCase.value) { pipes.add(csl); } pipes.add(st); pipes.add(sl2fs); pipes.add(featureCounter); Pipe serialPipe = new SerialPipes(pipes); Iterator<Instance> iterator = serialPipe.newIteratorFrom(reader); int count = 0; // We aren't really interested in the instance itself, // just the total feature counts. while (iterator.hasNext()) { count++; if (count % 100000 == 0) { System.out.println(count); } iterator.next(); } featureCounter.addPrunedWordsToStoplist(prunedTokenizer, pruneCount.value); } public static void writeInstanceList(SimpleTokenizer prunedTokenizer) throws IOException { CsvIterator reader = new CsvIterator(new FileReader(inputFile.value), lineRegex.value, dataGroup.value, labelGroup.value, nameGroup.value); ArrayList<Pipe> pipes = new ArrayList<Pipe>(); Alphabet alphabet = new Alphabet(); CharSequenceLowercase csl = new CharSequenceLowercase(); StringList2FeatureSequence sl2fs = new StringList2FeatureSequence(alphabet); if (! preserveCase.value) { pipes.add(csl); } pipes.add(prunedTokenizer); pipes.add(sl2fs); Pipe serialPipe = new SerialPipes(pipes); InstanceList instances = new InstanceList(serialPipe); instances.addThruPipe(reader); instances.save(outputFile.value); } public static void main (String[] args) throws IOException { // Process the command-line options CommandOption.setSummary (BulkLoader.class, "Efficient tool for importing large amounts of text into Mallet format"); CommandOption.process (BulkLoader.class, args); SimpleTokenizer tokenizer = null; if (stoplistFile.value != null) { tokenizer = new SimpleTokenizer(stoplistFile.value); } else if (removeStopWords.value) { tokenizer = new SimpleTokenizer(SimpleTokenizer.USE_DEFAULT_ENGLISH_STOPLIST); } else { tokenizer = new SimpleTokenizer(SimpleTokenizer.USE_EMPTY_STOPLIST); } if (pruneCount.value > 0) { generateStoplist(tokenizer); } writeInstanceList(tokenizer); } }
5,904
33.735294
176
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/Sequences.java
package cc.mallet.util; import cc.mallet.types.Sequence; /** Utility methods for cc.mallet.types.Sequence and similar classes. */ public class Sequences { public static double elementwiseAccuracy (Sequence truth, Sequence predicted) { int accuracy = 0; assert (truth.size() == predicted.size()); for (int i = 0; i < predicted.size(); i++) { //logger.fine("tokenAccuracy: ref: "+referenceOutput.get(i)+" viterbi: "+output.get(i)); if (truth.get(i).toString().equals (predicted.get(i).toString())) { accuracy++; } } return ((double)accuracy)/predicted.size(); } }
593
26
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/StatFunctions.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util; import java.util.logging.*; // Obtained from http://www.stat.vt.edu/~sundar/java/code/StatFunctions.html // August 2002 /** * @(#)StatFunctions.java * * DAMAGE (c) 2000 by Sundar Dorai-Raj * * @author Sundar Dorai-Raj * * Email: [email protected] * * This program is free software; you can redistribute it and/or * * modify it under the terms of the GNU General Public License * * as published by the Free Software Foundation; either version 2 * * of the License, or (at your option) any later version, * * provided that any use properly credits the author. * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details at http://www.gnu.org * * */ import java.io.*; import cc.mallet.util.MalletLogger; public final class StatFunctions { private static Logger logger = MalletLogger.getLogger(StatFunctions.class.getName()); public static double cov(Univariate x,Univariate y) { double sumxy=0; int i,n=(x.size()>=y.size() ? x.size():y.size()); try { for(i=0;i<x.size();i++) sumxy+=(x.elementAt(i)-x.mean())*(y.elementAt(i)-y.mean()); } catch (ArrayIndexOutOfBoundsException e) { logger.info ("size of x != size of y"); e.printStackTrace(); } return(sumxy/(n-1)); } public static double corr(Univariate x,Univariate y) { double cov=cov(x,y); return(cov/(x.stdev()*y.stdev())); } public static double[] ols(Univariate x,Univariate y) { double[] coef=new double[2]; int i,n=(x.size()<=y.size() ? x.size():y.size()); double sxy=0.0,sxx=0.0; double xbar=x.mean(),ybar=y.mean(),xi,yi; for(i=0;i<n;i++) { xi=x.elementAt(i); yi=y.elementAt(i); sxy+=(xi-xbar)*(yi-ybar); sxx+=(xi-xbar)*(xi-xbar); } coef[0]=sxy/sxx; coef[1]=ybar-coef[0]*xbar; return(coef); } public static double qnorm(double p,boolean upper) { /* Reference: J. D. Beasley and S. G. Springer Algorithm AS 111: "The Percentage Points of the Normal Distribution" Applied Statistics */ if(p<0 || p>1) throw new IllegalArgumentException("Illegal argument "+p+" for qnorm(p)."); double split=0.42, a0= 2.50662823884, a1=-18.61500062529, a2= 41.39119773534, a3=-25.44106049637, b1= -8.47351093090, b2= 23.08336743743, b3=-21.06224101826, b4= 3.13082909833, c0= -2.78718931138, c1= -2.29796479134, c2= 4.85014127135, c3= 2.32121276858, d1= 3.54388924762, d2= 1.63706781897, q=p-0.5; double r,ppnd; if(Math.abs(q)<=split) { r=q*q; ppnd=q*(((a3*r+a2)*r+a1)*r+a0)/((((b4*r+b3)*r+b2)*r+b1)*r+1); } else { r=p; if(q>0) r=1-p; if(r>0) { r=Math.sqrt(-Math.log(r)); ppnd=(((c3*r+c2)*r+c1)*r+c0)/((d2*r+d1)*r+1); if(q<0) ppnd=-ppnd; } else { ppnd=0; } } if(upper) ppnd=1-ppnd; return(ppnd); } public static double qnorm(double p,boolean upper,double mu,double sigma2) { return(qnorm(p,upper)*Math.sqrt(sigma2)+mu); } public static double pnorm(double z,boolean upper) { /* Reference: I. D. Hill Algorithm AS 66: "The Normal Integral" Applied Statistics */ double ltone=7.0, utzero=18.66, con=1.28, a1 = 0.398942280444, a2 = 0.399903438504, a3 = 5.75885480458, a4 =29.8213557808, a5 = 2.62433121679, a6 =48.6959930692, a7 = 5.92885724438, b1 = 0.398942280385, b2 = 3.8052e-8, b3 = 1.00000615302, b4 = 3.98064794e-4, b5 = 1.986153813664, b6 = 0.151679116635, b7 = 5.29330324926, b8 = 4.8385912808, b9 =15.1508972451, b10= 0.742380924027, b11=30.789933034, b12= 3.99019417011; double y,alnorm; if(z<0) { upper=!upper; z=-z; } if(z<=ltone || upper && z<=utzero) { y=0.5*z*z; if(z>con) { alnorm=b1*Math.exp(-y)/(z-b2+b3/(z+b4+b5/(z-b6+b7/(z+b8-b9/(z+b10+b11/(z+b12)))))); } else { alnorm=0.5-z*(a1-a2*y/(y+a3-a4/(y+a5+a6/(y+a7)))); } } else { alnorm=0; } if(!upper) alnorm=1-alnorm; return(alnorm); } public static double pnorm(double x,boolean upper,double mu,double sigma2) { return(pnorm((x-mu)/Math.sqrt(sigma2),upper)); } public static double qt(double p,double ndf,boolean lower_tail) { // Algorithm 396: Student's t-quantiles by // G.W. Hill CACM 13(10), 619-620, October 1970 if(p<=0 || p>=1 || ndf<1) throw new IllegalArgumentException("Invalid p or df in call to qt(double,double,boolean)."); double eps=1e-12; double M_PI_2=1.570796326794896619231321691640; // pi/2 boolean neg; double P,q,prob,a,b,c,d,y,x; if((lower_tail && p > 0.5) || (!lower_tail && p < 0.5)) { neg = false; P = 2 * (lower_tail ? (1 - p) : p); } else { neg = true; P = 2 * (lower_tail ? p : (1 - p)); } if(Math.abs(ndf - 2) < eps) { /* df ~= 2 */ q = Math.sqrt(2 / (P * (2 - P)) - 2); } else if (ndf < 1 + eps) { /* df ~= 1 */ prob = P * M_PI_2; q = Math.cos(prob)/Math.sin(prob); } else { /*-- usual case; including, e.g., df = 1.1 */ a = 1 / (ndf - 0.5); b = 48 / (a * a); c = ((20700 * a / b - 98) * a - 16) * a + 96.36; d = ((94.5 / (b + c) - 3) / b + 1) * Math.sqrt(a * M_PI_2) * ndf; y = Math.pow(d * P, 2 / ndf); if (y > 0.05 + a) { /* Asymptotic inverse expansion about normal */ x = qnorm(0.5 * P,false); y = x * x; if (ndf < 5) c += 0.3 * (ndf - 4.5) * (x + 0.6); c = (((0.05 * d * x - 5) * x - 7) * x - 2) * x + b + c; y = (((((0.4 * y + 6.3) * y + 36) * y + 94.5) / c - y - 3) / b + 1) * x; y = a * y * y; if (y > 0.002)/* FIXME: This cutoff is machine-precision dependent*/ y = Math.exp(y) - 1; else { /* Taylor of e^y -1 : */ y = (0.5 * y + 1) * y; } } else { y = ((1 / (((ndf + 6) / (ndf * y) - 0.089 * d - 0.822) * (ndf + 2) * 3) + 0.5 / (ndf + 4)) * y - 1) * (ndf + 1) / (ndf + 2) + 1 / y; } q = Math.sqrt(ndf * y); } if(neg) q = -q; return q; } public static double pt(double t,double df) { // ALGORITHM AS 3 APPL. STATIST. (1968) VOL.17, P.189 // Computes P(T<t) double a,b,idf,im2,ioe,s,c,ks,fk,k; double g1=0.3183098862;// =1/pi; if(df<1) throw new IllegalArgumentException("Illegal argument df for pt(t,df)."); idf=df; a=t/Math.sqrt(idf); b=idf/(idf+t*t); im2=df-2; ioe=idf%2; s=1; c=1; idf=1; ks=2+ioe; fk=ks; if(im2>=2) { for(k=ks;k<=im2;k+=2) { c=c*b*(fk-1)/fk; s+=c; if(s!=idf) { idf=s; fk+=2; } } } if(ioe!=1) return 0.5+0.5*a*Math.sqrt(b)*s; if(df==1) s=0; return 0.5+(a*b*s+Math.atan(a))*g1; } public double pchisq(double q,double df) { // Posten, H. (1989) American Statistician 43 p. 261-265 double df2=df*.5; double q2=q*.5; int n=5,k; double tk,CFL,CFU,prob; if(q<=0 || df<=0) throw new IllegalArgumentException("Illegal argument "+q+" or "+df+" for qnorm(p)."); if(q<df) { tk=q2*(1-n-df2)/(df2+2*n-1+n*q2/(df2+2*n)); for(k=n-1;k>1;k--) tk=q2*(1-k-df2)/(df2+2*k-1+k*q2/(df2+2*k+tk)); CFL=1-q2/(df2+1+q2/(df2+2+tk)); prob=Math.exp(df2*Math.log(q2)-q2-Maths.logGamma(df2+1)-Math.log(CFL)); } else { tk=(n-df2)/(q2+n); for(k=n-1;k>1;k--) tk=(k-df2)/(q2+k/(1+tk)); CFU=1+(1-df2)/(q2+1/(1+tk)); prob=1-Math.exp((df2-1)*Math.log(q2)-q2-Maths.logGamma(df2)-Math.log(CFU)); } return prob; } public static double betainv(double x,double p,double q) { // ALGORITHM AS 63 APPL. STATIST. VOL.32, NO.1 // Computes P(Beta>x) double beta=Maths.logBeta(p,q),acu=1E-14; double cx,psq,pp,qq,x2,term,ai,betain,ns,rx,temp; boolean indx; if(p<=0 || q<=0) return(-1.0); if(x<=0 || x>=1) return(-1.0); psq=p+q; cx=1-x; if(p<psq*x) { x2=cx; cx=x; pp=q; qq=p; indx=true; } else { x2=x; pp=p; qq=q; indx=false; } term=1; ai=1; betain=1; ns=qq+cx*psq; rx=x2/cx; temp=qq-ai; if(ns==0) rx=x2; while(temp>acu && temp>acu*betain) { term=term*temp*rx/(pp+ai); betain=betain+term; temp=Math.abs(term); if(temp>acu && temp>acu*betain) { ai++; ns--; if(ns>=0) { temp=qq-ai; if(ns==0) rx=x2; } else { temp=psq; psq+=1; } } } betain*=Math.exp(pp*Math.log(x2)+(qq-1)*Math.log(cx)-beta)/pp; if(indx) betain=1-betain; return(betain); } public static double pf(double x,double df1,double df2) { // ALGORITHM AS 63 APPL. STATIST. VOL.32, NO.1 // Computes P(F>x) return(betainv(df1*x/(df1*x+df2),0.5*df1,0.5*df2)); } }
10,124
27.68272
98
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/ColorUtils.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.util; import java.awt.*; import java.text.DecimalFormat; import cc.mallet.types.MatrixOps; /** * Utilities for dealing with RGB-style colors. * * Created: Mar 30, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: ColorUtils.java,v 1.1 2007/10/22 21:37:40 mccallum Exp $ */ public class ColorUtils { /** * Returns a list of hex color names of length n. * Colors are generated by equally-spaced hues in HSB space. * @param n Number of "equally-spaced" colors to return * @param s Saturation of generated colors * @param b Brightness * @return An array of hex color names, e.g., "#0033FF" */ public static String[] rainbow (int n, float s, float b) { double[] vals = new double[n]; for (int i = 0; i < n; i++) vals[i] = i; MatrixOps.timesEquals (vals, 1.0/n); String[] ret = new String[n]; for (int i = 0; i < n; i++) { int rgb = Color.HSBtoRGB ((float) vals[i], s, b); Color color = new Color (rgb); ret[i] = colorToHexString (color); } return ret; } private static String colorToHexString (Color color) { int r = color.getRed (); int g = color.getGreen (); int b = color.getBlue (); StringBuffer ret = new StringBuffer (); ret.append ('#'); if (r < 16) ret.append (0); ret.append (Integer.toHexString(r).toUpperCase()); if (g < 16) ret.append (0); ret.append (Integer.toHexString(g).toUpperCase()); if (b < 16) ret.append (0); ret.append (Integer.toHexString(b).toUpperCase()); return ret.toString (); } }
2,037
28.970588
76
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/tests/TestRandom.java
/* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://mallet.cs.umass.edu/ This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.util.tests; import cc.mallet.types.MatrixOps; import cc.mallet.util.Randoms; import cc.mallet.util.Strings; import junit.framework.TestCase; import junit.framework.Test; import junit.framework.TestSuite; /** * Created: Jan 19, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: TestRandom.java,v 1.1 2007/10/22 21:37:57 mccallum Exp $ */ public class TestRandom extends TestCase { public TestRandom (String name) { super (name); } public static Test suite () { return new TestSuite (TestRandom.class); } public static void testAsJava () { Randoms mRand = new Randoms (); java.util.Random jRand = mRand.asJavaRandom (); int size = 10000; double[] vals = new double [size]; for (int i = 0; i < size; i++) { vals[i] = jRand.nextGaussian (); } assertEquals (0.0, MatrixOps.mean (vals), 0.01); assertEquals (1.0, MatrixOps.stddev (vals), 0.01); } public static void main (String[] args) throws Throwable { TestSuite theSuite; if (args.length > 0) { theSuite = new TestSuite (); for (int i = 0; i < args.length; i++) { theSuite.addTest (new TestRandom (args[i])); } } else { theSuite = (TestSuite) TestRandom.suite (); } junit.textui.TestRunner.run (theSuite); } }
1,764
26.153846
76
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/tests/TestPriorityQueue.java
package cc.mallet.util.tests; import cc.mallet.util.search.*; import junit.framework.*; /** * Created by IntelliJ IDEA. * User: pereira * Date: Jun 18, 2005 * Time: 11:19:36 PM * Test priority queues and their implementation. */ public class TestPriorityQueue extends TestCase { private static final int N = 100; private static class Item implements QueueElement { private int position; private double priority; private Item(double p) { priority = p; } public double getPriority() { return priority; }; public void setPriority(double p) { priority = p; } public int getPosition() { return position; } public void setPosition(int p) { position = p; } } public TestPriorityQueue(String name) { super(name); } public void testAscending() { PriorityQueue q = new MinHeap(N); double p[] = new double[N]; for (int i = 0; i < N; i++) { p[i] = i; Item e = new Item(i); q.insert(e); } int j = 0; double pr = Double.NEGATIVE_INFINITY; assertTrue("ascending size", q.size() == N); while (q.size() > 0) { assertTrue("ascending extract", j < N); QueueElement e = q.extractMin(); assertTrue("ascending order", e.getPriority() > pr); assertEquals("ascending priority", e.getPriority(), p[j++], 1e-5); pr = e.getPriority(); } } public void testDescending() { PriorityQueue q = new MinHeap(N); double p[] = new double[N]; for (int i = 0; i < N; i++) { p[i] = i; Item e = new Item(N-i-1); q.insert(e); } int j = 0; double pr = Double.NEGATIVE_INFINITY; assertTrue("descending size", q.size() == N); while (q.size() > 0) { assertTrue("descending extract", j < N); QueueElement e = q.extractMin(); assertTrue("descending order", e.getPriority() > pr); assertEquals("descending priority", e.getPriority(), p[j++], 1e-5); pr = e.getPriority(); } } public void testChangePriority () { PriorityQueue q = new MinHeap(N); Item items[] = new Item[N]; for (int i = 0; i < N; i++) { Item e = new Item(N-i-1); q.insert(e); items[i] = e; } q.changePriority (items[N-1], -2); q.changePriority (items[N/2], -1); q.changePriority (items[N/2 + 1], N*2); int j = 0; double pr_last = Double.NEGATIVE_INFINITY; assertTrue("descending size", q.size() == N); while (q.size() > 0) { assertTrue("descending extract", j < N); QueueElement e = q.extractMin(); assertTrue("descending order", e.getPriority() > pr_last); pr_last = e.getPriority(); if (j == 0) assertTrue ("lowest elt", e.getPriority () == -2); if (j == 1) assertTrue ("second-lowest elt", e.getPriority () == -1); if (q.size() == 1) assertTrue ("penultimate elt", e.getPriority () == N-1); if (q.size() == 0) assertTrue ("final elt", e.getPriority () == N*2); j++; } } public void testReverse () { PriorityQueue q = new MinHeap(N); Item items[] = new Item[N]; for (int i = 0; i < N; i++) { Item e = new Item(N-i-1); q.insert(e); items[i] = e; } for (int i = 0; i < N; i++) { q.changePriority (items[i], i); } int j = 0; double pr_last = Double.NEGATIVE_INFINITY; assertTrue("ascending size", q.size() == N); while (q.size() > 0) { assertTrue("ascending extract", j < N); QueueElement e = q.extractMin(); assertTrue("ascending order", e.getPriority() > pr_last); pr_last = e.getPriority(); assertEquals ("ascending priority", items[j].getPriority (), e.getPriority ()); assertEquals ("ascending identity", items[j], e); j++; } } public void testEqualKeys () { PriorityQueue q = new MinHeap (N); Item[] items = new Item[20]; int j = 0; for (int i = 0; i < 5; i++) { items[j] = new Item (5); q.insert (items[j]); j++; } for (int i = 0; i < 5; i++) { items[j] = new Item (3); q.insert (items[j]); j++; } for (int i = 0; i < 5; i++) { items[j] = new Item (4); q.insert (items[j]); j++; } for (int i = 0; i < 5; i++) { items[j] = new Item (7); q.insert (items[j]); j++; } assertEquals (20, q.size ()); for (int i = 0; i < items.length; i++) { assertTrue (q.contains (items[i])); } for (int i = 0; i < 5; i++) { QueueElement e = q.extractMin (); assertTrue (q.contains (q.min ())); assertEquals (3.0, e.getPriority ()); } for (int i = 0; i < 5; i++) { QueueElement e = q.extractMin (); assertTrue (q.contains (q.min ())); assertEquals (4.0, e.getPriority ()); } for (int i = 0; i < 5; i++) { QueueElement e = q.extractMin (); assertTrue (q.contains (q.min ())); assertEquals (5.0, e.getPriority ()); } for (int i = 0; i < 5; i++) { QueueElement e = q.extractMin (); if (q.size() > 0) assertTrue (q.contains (q.min ())); assertEquals (7.0, e.getPriority ()); } } public static Test suite() { return new TestSuite(TestPriorityQueue.class); } public static void main(String[] args) { junit.textui.TestRunner.run(suite()); } }
5,290
26.847368
85
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/tests/TestMaths.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.util.tests; import cc.mallet.types.MatrixOps; import cc.mallet.util.Maths; import junit.framework.*; /** * Created: Oct 31, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: TestMaths.java,v 1.1 2007/10/22 21:37:57 mccallum Exp $ */ public class TestMaths extends TestCase { public TestMaths (String name) { super (name); } public void testLogBinom () { assertEquals (-3.207352, Maths.logBinom (25, 50, 0.4), 1e-5); assertEquals (-230.2585, Maths.logBinom (0, 100, 0.9), 1e-5); } public void testPbinom () { assertEquals (0.9426562, Maths.pbinom (25, 50, 0.4), 1e-5); assertEquals (0.001978561, Maths.pbinom (80, 100, 0.9), 1e-5); } public void testSumLogProb () { double[] vals = { 53.0, 1.56e4, 0.0045, 672.563, 1e-15 }; double[] logVals = new double [vals.length]; for (int i = 0; i < vals.length; i++) logVals [i] = Math.log (vals[i]); double sum = MatrixOps.sum (vals); double lsum2 = Double.NEGATIVE_INFINITY; for (int i = 0; i < logVals.length; i++) { lsum2 = Maths.sumLogProb (lsum2, logVals [i]); } assertEquals (sum, Math.exp(lsum2), 1e-5); double lsum = Maths.sumLogProb (logVals); assertEquals (sum, Math.exp (lsum), 1e-5); } public void testSubtractLogProb () { double a = 0.9; double b = 0.25; assertEquals (Math.log (a - b), Maths.subtractLogProb (Math.log (a), Math.log (b)), 1e-5); assertTrue (Double.isNaN (Maths.subtractLogProb (Math.log (b), Math.log (a)))); } public static Test suite () { return new TestSuite (TestMaths.class); } public static void main (String[] args) throws Throwable { TestSuite theSuite; if (args.length > 0) { theSuite = new TestSuite (); for (int i = 0; i < args.length; i++) { theSuite.addTest (new TestMaths (args[i])); } } else { theSuite = (TestSuite) suite (); } junit.textui.TestRunner.run (theSuite); } }
2,450
25.934066
94
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/tests/TestPropertyList.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.util.tests; import cc.mallet.util.PropertyList; import junit.framework.*; public class TestPropertyList extends TestCase { public TestPropertyList (String name) { super (name); } public void testOne () { PropertyList pl = null; pl = PropertyList.add ("one", 1.0, pl); pl = PropertyList.add ("two", 2.0, pl); pl = PropertyList.add ("three", 3, pl); assertTrue (pl.lookupNumber("one") == 1.0); pl = PropertyList.remove ("three", pl); assertTrue (pl.lookupNumber("three") == 0.0); pl = PropertyList.add ("color", "red", pl); assertTrue (pl.lookupObject("color").equals("red")); } public static Test suite () { return new TestSuite (TestPropertyList.class); } protected void setUp () { } public static void main (String[] args) { junit.textui.TestRunner.run (suite()); } }
1,361
23.321429
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/tests/TestStrings.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.util.tests; import cc.mallet.util.Strings; import junit.framework.*; /** * Created: Jan 19, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: TestStrings.java,v 1.1 2007/10/22 21:37:57 mccallum Exp $ */ public class TestStrings extends TestCase { public TestStrings (String name) { super (name); } public static Test suite () { return new TestSuite (TestStrings.class); } public static void testCount () { assertEquals (5, Strings.count ("abracadabra", 'a')); assertEquals (0, Strings.count ("hocus pocus", 'z')); } public static void main (String[] args) throws Throwable { TestSuite theSuite; if (args.length > 0) { theSuite = new TestSuite (); for (int i = 0; i < args.length; i++) { theSuite.addTest (new TestStrings (args[i])); } } else { theSuite = (TestSuite) suite (); } junit.textui.TestRunner.run (theSuite); } }
1,410
26.134615
76
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/tests/TestAStar.java
package cc.mallet.util.tests; import cc.mallet.util.search.*; import junit.framework.*; /** * Created by IntelliJ IDEA. * User: pereira * Date: Jun 19, 2005 * Time: 2:36:10 PM * Test A* search. */ public class TestAStar extends TestCase { private class State implements AStarState { private double to; private State next[]; private double cost[]; private int id; private boolean sink; private State(int id, int numEdges, double to, boolean sink) { this.id = id; next = new State[numEdges]; cost = new double[numEdges]; this.to = to; this.sink = sink; } public boolean isFinal() { return sink; } public double completionCost() { return to; } private class NextStates extends SearchState.NextStateIterator { private int i; private NextStates() { i = 0; } public boolean hasNext() { return i < next.length; } public SearchState nextState() { return next[i++]; } public double cost() { return cost[i-1]; } } public SearchState.NextStateIterator getNextStates() { return new NextStates(); } public String toString() { return "node " + id; } } public TestAStar(String name) { super(name); } public void testSmall() { State node5 = new State(5, 0, 0, true); State node6 = new State(6, 0, 0, true); State node2 = new State(2, 1, 6, false); node2.next[0] = node5; node2.cost[0] = 6; State node3 = new State(3, 2, 2, false); node3.next[0] = node5; node3.cost[0] = 4; node3.next[1] = node6; node3.cost[1] = 2; State node4 = new State(4, 1, 6, false); node4.next[0] = node6; node4.cost[0] = 6; State node0 = new State(0, 2, 4, false); node0.next[0] = node2; node0.cost[0] = 2; node0.next[1] = node3; node0.cost[1] = 2; State node1 = new State(1, 2, 3, false); node1.next[0] = node3; node1.cost[0] = 1; node1.next[1] = node4; node1.cost[1] = 1; State[][] paths = new State[6][]; double[] costs = new double[6]; paths[0] = new State[] { node6, node3, node1 }; costs[0] = 3; paths[1] = new State[] { node6, node3, node0 }; costs[1] = 4; paths[2] = new State[] { node5, node3, node1 }; costs[2] = 5; paths[3] = new State[] { node5, node3, node0 }; costs[3] = 6; paths[4] = new State[] { node6, node4, node1 }; costs[4] = 7; paths[5] = new State[] { node5, node2, node0 }; costs[5] = 8; AStar s = new AStar(new State[] {node0, node1}, 7); int i = 0; while (s.hasNext()) { assertTrue("number of answers > " + i, i < 6); SearchNode n = s.nextAnswer(); assertEquals("costs[" + i + "] != " + n.getPriority(), costs[i], n.getPriority(), 1e-5); int j = 0; while (n != null) { assertTrue("path length > " + j, j < 3); assertTrue("path[" + i + "][" + j + "] != " + n, paths[i][j] == n.getState()); j++; n = (SearchNode)n.getParent(); } assertTrue("path length != " + j, j == 3); i++; } assertTrue("number of answers != " + i, i == 6); } public static Test suite() { return new TestSuite(TestAStar.class); } public static void main(String[] args) { junit.textui.TestRunner.run(suite()); } }
3,287
30.92233
70
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/search/AStarNode.java
package cc.mallet.util.search; /** * Created by IntelliJ IDEA. * User: pereira * Date: Jun 19, 2005 * Time: 1:07:17 PM * Search node in an A* search. */ public class AStarNode extends SearchNode { /** * Iterator over new A* search nodes generated by state transitions * from this node's state. */ public class NextNodeIterator extends SearchNode.NextNodeIterator { protected NextNodeIterator() { super(); } public SearchNode nextNode() { AStarNode p = AStarNode.this; AStarState s = (AStarState)getStateIter().nextState(); return new AStarNode(s, p, p.getCost() + cost()); } } /** * Create an A* search node with given state, parent, and cost. * @param state the state * @param parent the parent * @param cost the cost */ public AStarNode(AStarState state, AStarNode parent, double cost) { super(state, parent, cost); } /** * Get the completion cost for the underlying state. * @return the completion cost */ public double completionCost() { return ((AStarState)getState()).completionCost(); } public SearchNode.NextNodeIterator getNextNodes() { return new NextNodeIterator(); } }
1,194
25.555556
69
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/search/SearchState.java
package cc.mallet.util.search; import java.util.Iterator; /** * Created by IntelliJ IDEA. * User: pereira * Date: Jun 20, 2005 * Time: 4:54:46 PM * A state (vertex) in a graph being searched. */ public interface SearchState { /** * Iterator over the states with transitions from a given state. */ public static abstract class NextStateIterator implements Iterator<SearchState> { public abstract boolean hasNext(); public SearchState next() {return nextState(); }; /** * Get the next reachable state. * @return the state */ public abstract SearchState nextState(); /** * The cost of the transition to the current state. * @return transition cost */ public abstract double cost(); public void remove() { throw new UnsupportedOperationException(); } } /** * Get an iterator over the states with transitions from * this state. * @return the iterator */ public abstract NextStateIterator getNextStates(); /** * Is this state final? * @return whether this state is final */ public abstract boolean isFinal(); }
1,122
23.955556
83
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/search/SearchNode.java
package cc.mallet.util.search; import java.util.Iterator; /** * Created by IntelliJ IDEA. * User: pereira * Date: Jun 20, 2005 * Time: 4:46:56 PM * * Search tree node. A search tree node pertains to some search graph state. * Multiple nodes may refer to the same state, representing different ways * of reaching the state. Search nodes have a priority, which determines when * they will be expanded, and cost of reaching the node from the start of the * search. */ public class SearchNode implements QueueElement { private int position = -1; private double priority = Double.POSITIVE_INFINITY; private double cost; private SearchNode parent; private SearchState state; /** * This iterator generates search nodes that refer to the * states reachable from the state pertaining to a this search node. */ public class NextNodeIterator implements Iterator { private SearchState.NextStateIterator stateIter; protected NextNodeIterator() { stateIter = state.getNextStates(); } public boolean hasNext() { return stateIter.hasNext(); } public Object next() { return nextNode(); }; /** * The search tree node for the next state reached from * the current state. * @return a new search tree node */ public SearchNode nextNode() { SearchNode p = SearchNode.this; SearchState s = stateIter.nextState(); return new SearchNode(s, p, p.getCost() + cost()); } /** * The cost associated to the transition from the previous * state to this state. * @return the cost */ public double cost() { return stateIter.cost(); } public void remove() { throw new UnsupportedOperationException(); } protected SearchState.NextStateIterator getStateIter() { return stateIter; } } /** * Create a search node with given state, parent, and cost. * @param state the state * @param parent the parent * @param cost the cost */ public SearchNode(SearchState state, SearchNode parent, double cost) { this.state = state; this.parent = parent; this.cost = cost; } public double getPriority() { return priority; } public void setPriority(double priority) { this.priority = priority; } public int getPosition() { return position; } public void setPosition(int position) { this.position = position; } /** * The node that generated this node. * @return the parent */ public SearchNode getParent() { return parent; } /** * Get the cost for this node. * @return the cost */ public double getCost() { return cost; } /** * The state for this search node. * @return the state */ public SearchState getState() { return state; } /** * Is the node's state final? * @return whether this state's node is final */ public boolean isFinal() { return state.isFinal(); } /** * Get an iterator over the new search nodes reachable * from this node by state transitions. * @return the iterator */ public NextNodeIterator getNextNodes() { return new NextNodeIterator(); } public String toString() { return state.toString() + "/" + priority; } }
3,173
29.228571
77
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/search/AStarState.java
package cc.mallet.util.search; /** * Created by IntelliJ IDEA. * User: pereira * Date: Jun 20, 2005 * Time: 5:16:05 PM * Search state with heuristic cost-to-completion. */ public interface AStarState extends SearchState { /** * Get the cost to completion. * @return the cost */ public abstract double completionCost(); }
341
19.117647
50
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/search/MinHeap.java
package cc.mallet.util.search; /** * Created by IntelliJ IDEA. * User: pereira * Date: Jun 18, 2005 * Time: 9:11:24 PM * <p/> * Binary heap implementation of <code>PriorityQueue</code>. * Based on algorithm in Corman, Leiserson, Rivest, and Stein (Section 6.5). */ public class MinHeap implements PriorityQueue { private QueueElement[] elts; private int size = 0; private static final int MIN_CAPACITY = 16; /** * Create a binary heap with initial capacity <code>capacity</code>. * The heap's capacity grows as needed to accomodate insertions. * * @param capacity initial capacity */ public MinHeap (int capacity) { if (capacity < MIN_CAPACITY) capacity = MIN_CAPACITY; elts = new QueueElement[capacity]; size = 0; } /** * Create a binary heap with minimum initial capacity. */ public MinHeap () { this (MIN_CAPACITY); } private void heapify (int i) { int l = 2 * i + 1; int r = 2 * i + 2; int first; if (l < size && elts[l].getPriority () < elts[i].getPriority ()) first = l; else first = i; if (r < size && elts[r].getPriority () < elts[first].getPriority ()) first = r; if (first != i) { QueueElement e = elts[i]; elts[i] = elts[first]; elts[i].setPosition (i); elts[first] = e; e.setPosition (first); heapify (first); } } public int size () { return size; } public QueueElement min () { if (size == 0) throw new IndexOutOfBoundsException ("queue empty"); return elts[0]; } public QueueElement extractMin () { if (size == 0) throw new IndexOutOfBoundsException ("queue empty"); QueueElement min = elts[0]; elts[0] = elts[--size]; elts[0].setPosition (0); // this is necessary in case elts[size-1] happens to be the best --cas heapify (0); min.setPosition (-1); return min; } public void changePriority (QueueElement e, double priority) { if (!contains (e)) throw new IllegalArgumentException ("Element not in queue"); if (priority <= e.getPriority ()) { decreaseKey (e, priority); } else { increaseKey (e, priority); } } private void increaseKey (QueueElement e, double priority) { e.setPriority (priority); heapify (e.getPosition ()); } private void decreaseKey (QueueElement e, double priority) { e.setPriority (priority); int i = e.getPosition (); int j; while (i > 0 && elts[j = (i - 1) / 2].getPriority () > elts[i].getPriority ()) { QueueElement p = elts[j]; elts[j] = elts[i]; elts[j].setPosition (j); elts[i] = p; p.setPosition (i); i = j; } } public void insert (QueueElement e) { if (size == elts.length) { QueueElement[] newElts = new QueueElement[size + size / 2]; for (int i = 0; i < size; i++) newElts[i] = elts[i]; elts = newElts; } e.setPosition (size); elts[size++] = e; changePriority (e, e.getPriority ()); } public boolean contains (QueueElement e) { int pos = e.getPosition (); return pos >= 0 && pos < size && e == elts[pos]; } public QueueElement[] toArray () { QueueElement[] arr = new QueueElement[size ()]; System.arraycopy (elts, 0, arr, 0, size ()); return arr; } // for debugging --cas private void checkHeap (int i) { int child = 2*i + 1; if (child < size) { assert elts[i].getPriority () <= elts [child].getPriority (); checkHeap (child); } child = 2*i + 2; if (child < size) { assert elts[i].getPriority () <= elts [child].getPriority (); checkHeap (child); } } }
3,708
22.18125
100
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/search/PriorityQueue.java
package cc.mallet.util.search; /** * Created by IntelliJ IDEA. * User: pereira * Date: Jun 18, 2005 * Time: 7:46:46 PM * * Interface representing the basic methods for a priority queue. */ public interface PriorityQueue { /** * Insert element <code>e</code> into the queue. * @param e the element to insert */ public void insert(QueueElement e); /** * The current size of the queue. * @return current size */ public int size(); /** * Return the top element of the queue. * @return top element of the queue */ public QueueElement min(); /** * Remove the top element of the queue. * @return the element removed */ public QueueElement extractMin(); /** * Change the priority of queue element <code>e</code> to <code>priority</code>. * The element's position in the queue is adjusted as needed. * @param e the element that has been changed * @param priority the new priority */ public void changePriority (QueueElement e, double priority); /** * Does the queue contain an element? * @param e the element * @return whether the queue contains the element */ public boolean contains(QueueElement e); /** Returns any array containing all of the elements in the queue. * They are not guaranteed to be in any particular order. */ public QueueElement[] toArray (); }
1,370
23.052632
82
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/search/AStar.java
package cc.mallet.util.search; import java.util.Iterator; import java.util.logging.Logger; import cc.mallet.util.MalletLogger; /** * Created by IntelliJ IDEA. * User: pereira * Date: Jun 19, 2005 * Time: 1:38:28 PM * A* search iterator over an underlying graph. The iterator returns * search nodes for final states in order of increasing cost to reach them * from the initial states, assuming that the heuristic cost-to-completion * function is admissible. This very simple version * assumes that we may revisit already visited states, because * we want to generate all paths to final states in order of * increasing cost. */ public class AStar implements Iterator<AStarNode> { private static Logger logger = MalletLogger.getLogger(AStar.class.getName()); private PriorityQueue q; private AStarNode answer; private boolean needNext; /** * Create an A* search iterator starting from the given initial states. * The expected size parameter gives the size of the search queue. If this * is too small, growing the queue costs more time. If this is too big, * space is wasted. * * @param initial the set of initial states * @param expectedSize the expected size of the search queue */ public AStar(AStarState[] initial, int expectedSize) { q = new MinHeap(expectedSize); for (int i = 0; i < initial.length; i++) { AStarState s = initial[i]; AStarNode n = new AStarNode(s, null, 0); n.setPriority(s.completionCost()); q.insert(n); } needNext = true; } private void lookAhead() { if (needNext) { answer = search(); needNext = false; } } public boolean hasNext() { lookAhead(); return answer != null; } public AStarNode next() { return nextAnswer(); } /** * Get the next search node for a final state. * @return a final search node */ public AStarNode nextAnswer() { lookAhead(); needNext = true; return answer; } public void remove() { throw new UnsupportedOperationException(); } private AStarNode search() { while (q.size() > 0) { AStarNode u = (AStarNode)q.extractMin(); //logger.info(u + ": " + u.getPriority()); if (u.isFinal()) { //logger.info("Final " + u); return u; } SearchNode.NextNodeIterator i = u.getNextNodes(); while (i.hasNext()) { AStarNode v = (AStarNode)i.nextNode(); double priority = v.getCost() + v.completionCost(); //logger.info("insert " + v + " at " + priority); v.setPriority(priority); q.insert(v); } } return null; } }
2,623
28.483146
79
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/search/QueueElement.java
package cc.mallet.util.search; /** * Created by IntelliJ IDEA. * User: pereira * Date: Jun 18, 2005 * Time: 7:31:08 PM * * Queue elements have a priority, and a queue position. * Lower-priority elements are closer to the front of the queue. * The queue position is set by the queue implementation, * and should not be changed outside the queue implementation. */ public interface QueueElement { /** * Get this element's priority. * @return the priority */ public double getPriority(); /** * Set the priority of this element. * @param priority the element's new priority */ public void setPriority(double priority); /** * Get the queue position of this element. If the element is not in a queue, * the returned value is meaningless. * @return the current position */ public int getPosition(); /** * Set the current queue position for this element. This should only * be called by a queue implementation. * @param pos the new position for the element */ public void setPosition(int pos); }
1,057
26.842105
78
java