repo
stringlengths
1
191
file
stringlengths
23
351
code
stringlengths
0
5.32M
file_length
int64
0
5.32M
avg_line_length
float64
0
2.9k
max_line_length
int64
0
288k
extension_type
stringclasses
1 value
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/ConstantFactor.java
/* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.types; import cc.mallet.util.Maths; import cc.mallet.util.Randoms; /** * $Id: ConstantFactor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class ConstantFactor extends AbstractFactor { private double c; public ConstantFactor (double c) { super (new HashVarSet ()); this.c = c; } protected Factor extractMaxInternal (VarSet varSet) { return this; } protected double lookupValueInternal (int i) { return c; } protected Factor marginalizeInternal (VarSet varsToKeep) { return this; } public double value (AssignmentIterator it) { return c; } // I can't imagine why anyone whould want to call this method. public Factor normalize () { c = 1.0; return this; } public Assignment sample (Randoms r) { return new Assignment (); } public String dumpToString () { return "[ConstantFactor : "+c+" ]"; } public String toString () { return dumpToString (); } public Factor slice (Assignment assn) { return this; } public Factor duplicate () { return new ConstantFactor (c); } public boolean almostEquals (Factor p, double epsilon) { return (p instanceof ConstantFactor && Maths.almostEquals (c, ((ConstantFactor)p).c, epsilon)); } public boolean isNaN () { return Double.isNaN (c); } public Factor multiply (Factor other) { // special handling of identity factor if (Maths.almostEquals (c, 1.0)) { return other.duplicate (); } else if (other instanceof ConstantFactor) { return new ConstantFactor (c * ((ConstantFactor)other).c); } else { return other.multiply (this); } } public void multiplyBy (Factor other) { if (!(other instanceof ConstantFactor)) { throw new UnsupportedOperationException ("Can't multiply a constant factor by "+other); } else { ConstantFactor otherCnst = (ConstantFactor) other; c *= otherCnst.c; } } public static Factor makeIdentityFactor () { return new ConstantFactor (1.0); } // Serialization garbage private static final long serialVersionUID = -2934945791792969816L; }
2,596
21.008475
99
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/Variable.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.types; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.IOException; import java.io.Serializable; import cc.mallet.types.LabelAlphabet; import cc.mallet.util.PropertyList; /** * Class for a discrete random variable in a graphical model. * * Created: Thu Sep 18 09:32:25 2003 * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: Variable.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class Variable implements Comparable, Serializable { private String label; // name of this variable private LabelAlphabet outcomes; /** Number of outcomes for a continous variable. */ public static final int CONTINUOUS = -1; private static int counter = 0; private Universe universe; private int index; /** * Creates a new variable with the given outcomes. */ public Variable (LabelAlphabet outs) { this (Universe.DEFAULT, outs); } public Variable (Universe universe, LabelAlphabet outs) { this.universe = universe; this.outcomes = outs; if (outs.size() < 1) { throw new IllegalArgumentException ("Attempt to create variable with "+outs.size()+" outcomes."); } setName (); index = universe.add (this); } public Variable (int numOutcomes) { this (Universe.DEFAULT, numOutcomes); } public Variable (Universe universe, int numOutcomes) { this.universe = universe; if (numOutcomes > 0) outcomes = createBlankAlphabet (numOutcomes); setName (); index = universe.add (this); } private static LabelAlphabet createBlankAlphabet (int numOutcomes) { if (numOutcomes > 0) { LabelAlphabet outcomes = new LabelAlphabet (); /* Setup default outcomes */ for (int i = 0; i < numOutcomes; i++) { outcomes.lookupIndex (new Integer (i)); } return outcomes; } else return null; } private void setName () { setLabel ("VAR" + (counter++)); } public String getLabel () { return label; } public void setLabel (String label) { this.label = label; } public int getNumOutcomes () { if (outcomes == null) { // we're continuous return CONTINUOUS; } else { return outcomes.size(); } } public Object lookupOutcome (int i) { return outcomes.lookupObject (i); } public LabelAlphabet getLabelAlphabet () { return outcomes; } public int compareTo(Object o) { /* Variable var = (Variable) o; return getLabel().compareTo (var.getLabel()); */ int index = this.index; int index2 = ((Variable)o).index; if (index == index2) { return 0; } else if (index < index2) { return -1; } else { return 1; } /**/ } transient private PropertyList properties = null; public void setNumericProperty (String key, double value) { properties = PropertyList.add (key, value, properties); } public double getNumericProperty (String key) { return properties.lookupNumber (key); } public String toString () { return label; } /** Returns the index of this variable in its universe */ public int getIndex () { return index; } public Universe getUniverse () { return universe; } // Serialization garbage private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); out.writeInt (CURRENT_SERIAL_VERSION); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); in.readInt (); } public boolean isContinuous () { return outcomes == null; } }
4,233
21.052083
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/ParameterizedFactor.java
package cc.mallet.grmm.types; /** * A factor that supports taking derivatives with respect to its continuous variables. * For example, a Gaussian factor can support derivatives with respect to its mean and precision. * $Id: ParameterizedFactor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public interface ParameterizedFactor extends Factor { /** * Computes the expected derivative of the log factor value. That is, * <pre>sum_{y} q(y) dlog f(y) / d theta<pre>, * where y are the outcomes of the discrete varables in the factor, * f(y) is the factor value, and theta is the vector of continuous variables * in the factor. q is a user-specified distribution to take the expectation * with respect to. * <p> * The factor q specifies with variables to sum over. The summation will be over * all the variables in <tt>q.varSet()</tt>, and the rest of the variables will be used * * <p> * @param q Distribution to take with respect to (need not be normalized). * <tt>q.varSet()</tt> should be all of the variables of this factor, except for one continuous variable * * @param param Parameter to take gradient with respect to. * @return The expected gradient */ public double sumGradLog (Factor q, Variable param, Assignment assn); }
1,309
41.258065
109
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/UndirectedGrid.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.types; /** * A grid-shaped undirected graphical model. All this adds to the * base UndirectedModel class is the ability to retrieve variables * by their (x,y) position. * * Created: Mar 28, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: UndirectedGrid.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class UndirectedGrid extends UndirectedModel { private Variable[][] vars; private int width; private int height; /** * Creates an undirected grid and its associated Variable objects. * @param width The max x coordinate of the grid. * @param height The max y coordinate of thee grid. * @param numOutcomes The number of outcomes of each created variable. */ public UndirectedGrid (int width, int height, int numOutcomes) { super (width * height); this.width = width; this.height = height; addVariables (numOutcomes); // addEdges (); } public int getWidth () { return width; } public int getHeight () { return height; } /* xxx Is this necessary any more? private void addEdges () { // add up-down edges for (int x = 0; x < width; x++) { for (int y = 0; y < height - 1; y++) { Variable v1 = vars[x][y]; Variable v2 = vars[x][y+1]; addEdge (v1, v2); } } // add left-right edges for (int x = 0; x < width - 1; x++) { for (int y = 0; y < height; y++) { Variable v1 = vars[x][y]; Variable v2 = vars[x+1][y]; addEdge (v1, v2); } } } */ private void addVariables (int numOutcomes) { vars = new Variable [width][height]; for (int x = 0; x < width; x++) { for (int y = 0; y < height; y++) { vars[x][y] = new Variable (numOutcomes); } } } public Variable get (int x, int y) { return vars[x][y]; } }
2,327
25.454545
77
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/Factors.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.types; import java.util.*; import cc.mallet.grmm.inference.Inferencer; import cc.mallet.grmm.util.Flops; import cc.mallet.types.*; import cc.mallet.util.*; import gnu.trove.TIntArrayList; import gnu.trove.TDoubleArrayList; /** * A static utility class containing utility methods for dealing with factors, * especially TableFactor objects. * * Created: Mar 17, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: Factors.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class Factors { public static CPT normalizeAsCpt (AbstractTableFactor ptl, Variable var) { double[] sums = new double [ptl.numLocations ()]; Arrays.fill (sums, Double.NEGATIVE_INFINITY); // Compute normalization factor for each neighbor assignment VarSet neighbors = new HashVarSet (ptl.varSet ()); neighbors.remove (var); for (AssignmentIterator it = ptl.assignmentIterator (); it.hasNext (); it.advance ()) { Assignment assn = it.assignment (); Assignment nbrAssn = (Assignment) assn.marginalizeOut (var); int idx = nbrAssn.singleIndex (); // sums[idx] += ptl.phi (assn); sums[idx] = Maths.sumLogProb (ptl.logValue (assn), sums[idx]); } // ...and then normalize potential for (AssignmentIterator it = ptl.assignmentIterator (); it.hasNext (); it.advance ()) { Assignment assn = it.assignment (); double oldVal = ptl.logValue (assn); // double oldVal = ptl.phi (assn); Assignment nbrAssn = (Assignment) assn.marginalizeOut (var); double logZ = sums[nbrAssn.singleIndex ()]; // ptl.setPhi (assn, oldVal / logZ); if (Double.isInfinite (oldVal) && Double.isInfinite (logZ)) { // 0/0 = 0 ptl.setLogValue (assn, Double.NEGATIVE_INFINITY); } else { ptl.setLogValue (assn, oldVal - logZ); } } return new CPT (ptl, var); } public static Factor average (Factor ptl1, Factor ptl2, double weight) { // complete hack TableFactor mptl1 = (TableFactor) ptl1; TableFactor mptl2 = (TableFactor) ptl2; return TableFactor.hackyMixture (mptl1, mptl2, weight); } public static double oneDistance (Factor bel1, Factor bel2) { Set vs1 = bel1.varSet (); Set vs2 = bel2.varSet (); if (!vs1.equals (vs2)) { throw new IllegalArgumentException ("Attempt to take distancebetween mismatching potentials "+bel1+" and "+bel2); } double dist = 0; for (AssignmentIterator it = bel1.assignmentIterator (); it.hasNext ();) { Assignment assn = it.assignment (); dist += Math.abs (bel1.value (assn) - bel2.value (assn)); it.advance (); } return dist; } public static TableFactor retainMass (DiscreteFactor ptl, double alpha) { int[] idxs = new int [ptl.numLocations ()]; double[] vals = new double [ptl.numLocations ()]; for (int i = 0; i < idxs.length; i++) { idxs[i] = ptl.indexAtLocation (i); vals[i] = ptl.logValue (i); } RankedFeatureVector rfv = new RankedFeatureVector (new Alphabet(), idxs, vals); TIntArrayList idxList = new TIntArrayList (); TDoubleArrayList valList = new TDoubleArrayList (); double mass = Double.NEGATIVE_INFINITY; double logAlpha = Math.log (alpha); for (int rank = 0; rank < rfv.numLocations (); rank++) { int idx = rfv.getIndexAtRank (rank); double val = rfv.value (idx); mass = Maths.sumLogProb (mass, val); idxList.add (idx); valList.add (val); if (mass > logAlpha) { break; } } int[] szs = computeSizes (ptl); SparseMatrixn m = new SparseMatrixn (szs, idxList.toNativeArray (), valList.toNativeArray ()); TableFactor result = new TableFactor (computeVars (ptl)); result.setValues (m); return result; } public static int[] computeSizes (Factor result) { int nv = result.varSet ().size(); int[] szs = new int [nv]; for (int i = 0; i < nv; i++) { Variable var = result.getVariable (i); szs[i] = var.getNumOutcomes (); } return szs; } public static Variable[] computeVars (Factor result) { int nv = result.varSet ().size(); Variable[] vars = new Variable [nv]; for (int i = 0; i < nv; i++) { Variable var = result.getVariable (i); vars[i] = var; } return vars; } /** * Given a joint distribution over two variables, returns their mutual information. * @param factor A joint distribution. Must be normalized, and over exactly two variables. * @return The mutual inforamiton */ public static double mutualInformation (Factor factor) { VarSet vs = factor.varSet (); if (vs.size() != 2) throw new IllegalArgumentException ("Factor must have size 2"); Factor marg1 = factor.marginalize (vs.get (0)); Factor marg2 = factor.marginalize (vs.get (1)); double result = 0; for (Iterator it = factor.assignmentIterator (); it.hasNext(); ) { Assignment assn = (Assignment) it.next (); result += (factor.value (assn)) * (factor.logValue (assn) - marg1.logValue (assn) - marg2.logValue (assn)); } return result; } public static double KL (AbstractTableFactor f1, AbstractTableFactor f2) { double result = 0; // assumes same var set for (int loc = 0; loc < f1.numLocations (); loc++) { double val1 = f1.valueAtLocation (loc); double val2 = f2.value (f1.indexAtLocation (loc)); if (val1 > 1e-5) { result += val1 * Math.log (val1 / val2); } } return result; } /** * Returns a new Factor <tt>F = alpha * f1 + (1 - alpha) * f2</tt>. */ public static Factor mix (AbstractTableFactor f1, AbstractTableFactor f2, double alpha) { return AbstractTableFactor.hackyMixture (f1, f2, alpha); } public static double euclideanDistance (AbstractTableFactor f1, AbstractTableFactor f2) { double result = 0; // assumes same var set for (int loc = 0; loc < f1.numLocations (); loc++) { double val1 = f1.valueAtLocation (loc); double val2 = f2.value (f1.indexAtLocation (loc)); result += (val1 - val2) * (val1 - val2); } return Math.sqrt (result); } public static double l1Distance (AbstractTableFactor f1, AbstractTableFactor f2) { double result = 0; // assumes same var set for (int loc = 0; loc < f1.numLocations (); loc++) { double val1 = f1.valueAtLocation (loc); double val2 = f2.value (f1.indexAtLocation (loc)); result += Math.abs (val1 - val2); } return result; } /** * Adapter that allows an Inferencer to be treated as if it were a factor. * @param inf An inferencer on which computeMarginals() has been called. * @return A factor */ public static Factor asFactor (final Inferencer inf) { return new SkeletonFactor () { public double value (Assignment assn) { Factor factor = inf.lookupMarginal (assn.varSet ()); return factor.value (assn); } public Factor marginalize (Variable vars[]) { return inf.lookupMarginal (new HashVarSet (vars)); } public Factor marginalize (Collection vars) { return inf.lookupMarginal (new HashVarSet (vars)); } public Factor marginalize (Variable var) { return inf.lookupMarginal (new HashVarSet (new Variable[] { var })); } public Factor marginalizeOut (Variable var) { throw new UnsupportedOperationException (); } public Factor marginalizeOut (VarSet varset) { throw new UnsupportedOperationException (); } public VarSet varSet () { throw new UnsupportedOperationException (); } }; } public static Variable[] discreteVarsOf (Factor fg) { List vars = new ArrayList (); VarSet vs = fg.varSet (); for (int vi = 0; vi < vs.size (); vi++) { Variable var = vs.get (vi); if (!var.isContinuous ()) { vars.add (var); } } return (Variable[]) vars.toArray (new Variable [vars.size ()]); } public static Variable[] continuousVarsOf (Factor fg) { List vars = new ArrayList (); VarSet vs = fg.varSet (); for (int vi = 0; vi < vs.size (); vi++) { Variable var = vs.get (vi); if (var.isContinuous ()) { vars.add (var); } } return (Variable[]) vars.toArray (new Variable [vars.size ()]); } public static double corr (Factor factor) { if (factor.varSet ().size() != 2) throw new IllegalArgumentException ("corr() only works on Factors of size 2, tried "+factor); Variable v0 = factor.varSet ().get (0); Variable v1 = factor.varSet ().get (1); double eXY = 0.0; for (AssignmentIterator it = factor.assignmentIterator (); it.hasNext();) { Assignment assn = (Assignment) it.next (); int val0 = assn.get (v0); int val1 = assn.get (v1); eXY += factor.value (assn) * val0 * val1; } double eX = mean (factor.marginalize (v0)); double eY = mean (factor.marginalize (v1)); return eXY - eX * eY; } private static double mean (Factor factor) { if (factor.varSet ().size() != 1) throw new IllegalArgumentException ("mean() only works on Factors of size 1, tried "+factor); Variable v0 = factor.varSet ().get (0); double mean = 0.0; for (AssignmentIterator it = factor.assignmentIterator (); it.hasNext();) { Assignment assn = (Assignment) it.next (); int val0 = assn.get (v0); mean += factor.value (assn) * val0; } return mean; } public static Factor multiplyAll (Collection factors) { Factor first = (Factor) factors.iterator ().next (); if (factors.size() == 1) { return first.duplicate (); } /* Get all the variables */ VarSet vs = new HashVarSet (); for (Iterator it = factors.iterator (); it.hasNext ();) { Factor phi = (Factor) it.next (); vs.addAll (phi.varSet ()); } /* define a new potential over the neighbors of NODE */ Factor result = first.duplicate (); for (Iterator it = factors.iterator (); it.hasNext ();) { Factor phi = (Factor) it.next (); result.multiplyBy (phi); } return result; } public static double distLinf (AbstractTableFactor f1, AbstractTableFactor f2) { // double sum1 = f1.logsum (); // double sum2 = f2.logsum (); Matrix m1 = f1.getLogValueMatrix (); Matrix m2 = f2.getLogValueMatrix (); return matrixDistLinf (m1, m2); } public static double distValueLinf (AbstractTableFactor f1, AbstractTableFactor f2) { // double sum1 = f1.logsum (); // double sum2 = f2.logsum (); Matrix m1 = f1.getValueMatrix (); Matrix m2 = f2.getValueMatrix (); return matrixDistLinf (m1, m2); } private static double matrixDistLinf (Matrix m1, Matrix m2) { double max = 0; int nl1 = m1.singleSize (); int nl2 = m2.singleSize (); if (nl1 != nl2) return Double.POSITIVE_INFINITY; for (int l = 0; l < nl1; l++) { double val1 = m1.singleValue (l); double val2 = m2.singleValue (l); double diff = (val1 > val2) ? val1 - val2 : val2 - val1; max = (diff > max) ? diff : max; } return max; } /** Implements the error range measure from Ihler et al. */ public static double logErrorRange (AbstractTableFactor f1, AbstractTableFactor f2) { double error_min = Double.MAX_VALUE; double error_max = 0; Matrix m1 = f1.getLogValueMatrix (); Matrix m2 = f2.getLogValueMatrix (); int nl1 = m1.singleSize (); int nl2 = m2.singleSize (); if (nl1 != nl2) return Double.POSITIVE_INFINITY; for (int l = 0; l < nl1; l++) { double val1 = m1.singleValue (l); double val2 = m2.singleValue (l); double diff = (val1 > val2) ? val1 - val2 : val2 - val1; error_max = (diff > error_max) ? diff : error_max; error_min = (diff < error_min) ? diff : error_min; } return error_max - error_min; } }
12,480
28.930456
119
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/Tree.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.types; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import cc.mallet.types.Alphabet; import gnu.trove.TObjectIntHashMap; /** * Class for arbitrary trees, based on implementation in OpenJGraph. * The OpenJGraph tree implementation is a bit minimal wrt * convenience functions, so we add a few here. * * Created: Wed Oct 1 14:51:47 2003 * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: Tree.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class Tree { private TObjectIntHashMap vertex2int = new TObjectIntHashMap (); private ArrayList int2vertex = new ArrayList (); private ArrayList parents = new ArrayList (); private ArrayList children = new ArrayList (); private Object root = null; public Tree() {} // Tree constructor // Efficient indexing of parents, children public static Tree makeFromSubtree (Object parent, List subtrees) { Tree tree = new Tree(); tree.add (parent); for (Iterator it = subtrees.iterator (); it.hasNext ();) { Tree subtree = (Tree) it.next (); tree.addSubtree (parent, subtree, subtree.getRoot ()); } return tree; } private void addSubtree (Object parent, Tree subtree, Object child) { addNode (parent, child); for (Iterator it = subtree.getChildren (child).iterator (); it.hasNext ();) { Object gchild = it.next (); addSubtree (child, subtree, gchild); } } protected int lookupIndex (Object v) { return vertex2int.get (v); } protected Object lookupVertex (int idx) { return int2vertex.get (idx); } int maybeAddVertex (Object v) { int foo = vertex2int.get (v); if (foo == -1) { foo = int2vertex.size (); int2vertex.add (v); vertex2int.put (v, foo); parents.add (null); children.add (new ArrayList ()); } return foo; } public void add (Object rt) { if (root == null) { maybeAddVertex (rt); root = rt; } else { throw new UnsupportedOperationException ("This tree already has a root."); } } public void addNode (Object parent, Object child) { int id1; if (root == null) { root = parent; id1 = maybeAddVertex (parent); } else if ((id1 = lookupIndex (parent)) == -1) throw new UnsupportedOperationException ("This tree already has a root."); int id2 = maybeAddVertex (child); Object oldParent = parents.get (id2); if ((oldParent != null) && (oldParent != parent)) throw new UnsupportedOperationException ("Trying to change parent of Object "+child+" from " +oldParent+" to "+parent); parents.set (id2, parent); ArrayList childList = (ArrayList) children.get (id1); childList.add (child); } public Object getParent (Object child) { int pidx = vertex2int.get (child); if (pidx < 0) { return null; } else { return parents.get (pidx); } } public List getChildren (Object parent) { int id = vertex2int.get (parent); return Collections.unmodifiableList ((List) children.get (id)); } // Convenience functions public boolean isRoot (Object var) { int idx = lookupIndex (var); return (parents.get(idx) == null); } public boolean containsObject (Object v) { return (vertex2int.get (v) >= 0); } public boolean isLeaf (Object v) { int idx = lookupIndex (v); return ((List)children.get(idx)).size() == 0; } public Iterator getVerticesIterator () { return int2vertex.iterator(); } public Object getRoot () { return root; } public String dumpToString () { StringBuffer buf = new StringBuffer (); dumpRec (root, 0, buf); return buf.toString (); } private void dumpRec (Object node, int lvl, StringBuffer buf) { for (int i = 0; i < 3 * lvl; i++) { buf.append ("-"); } buf.append (" ").append (node).append ("\n"); for (Iterator it = getChildren (node).iterator (); it.hasNext();) { Object child = it.next (); dumpRec (child, lvl+1, buf); } } } // Tree
4,655
24.032258
81
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/DirectedModel.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.types; // Generated package name import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.util.Iterator; import java.util.Map; import org._3pq.jgrapht.DirectedGraph; import org._3pq.jgrapht.graph.DefaultDirectedGraph; import org._3pq.jgrapht.alg.ConnectivityInspector; import gnu.trove.THashMap; /** * Class for directed graphical models. This is just a * souped-up Graph. * * Created: Mon Sep 15 14:50:19 2003 * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: DirectedModel.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class DirectedModel extends FactorGraph { private Map allCpts = new THashMap (); // Graph object used to prevent directed cycles private DirectedGraph graph = new DefaultDirectedGraph (); public DirectedModel () { } public DirectedModel (Variable[] vars) { super (vars); } public DirectedModel (int capacity) { super (capacity); } protected void beforeFactorAdd (Factor factor) { super.beforeFactorAdd (factor); if (!(factor instanceof CPT)) { throw new IllegalArgumentException ("Factors of a directed model must be an instance of CPT, was "+factor); } CPT cpt = (CPT) factor; Variable child = cpt.getChild (); VarSet parents = cpt.getParents (); if (graph.containsVertex (child)) { checkForNoCycle (parents, child, cpt); } } private void checkForNoCycle (VarSet parents, Variable child, CPT cpt) { ConnectivityInspector inspector = new ConnectivityInspector (graph); for (Iterator it = parents.iterator (); it.hasNext ();) { Variable rent = (Variable) it.next (); if (inspector.pathExists (child, rent)) { throw new IllegalArgumentException ("Error adding CPT: Would create directed cycle"+ "From: "+rent+" To:"+child+"\nCPT: "+cpt); } } } protected void afterFactorAdd (Factor factor) { super.afterFactorAdd (factor); CPT cpt = (CPT) factor; Variable child = cpt.getChild (); VarSet parents = cpt.getParents (); allCpts.put (child, cpt); graph.addVertex (child); graph.addAllVertices (parents); for (Iterator it = parents.iterator (); it.hasNext ();) { Variable rent = (Variable) it.next (); graph.addEdge (rent, child); } } /** * Returns the conditional distribution <tt>P ( node | Parents (node) )</tt> */ public CPT getCptofVar (Variable node) { return (CPT) allCpts.get (node); } // Serialization garbage private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); out.writeInt (CURRENT_SERIAL_VERSION); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); int version = in.readInt (); } }// DirectedModel
3,466
27.418033
113
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/AbstractTableFactor.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.types; import gnu.trove.TIntObjectHashMap; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.util.*; import cc.mallet.grmm.util.GeneralUtils; import cc.mallet.types.*; import cc.mallet.util.Maths; import cc.mallet.util.Randoms; /** * Class for a multivariate multinomial distribution. * <p/> * Created: Mon Sep 15 17:19:24 2003 * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: AbstractTableFactor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public abstract class AbstractTableFactor implements DiscreteFactor { /** * Maps all of the Variable objects of this distribution * to an integer that says which dimension in the probs * matrix correspands to that var. */ private Universe universe = Universe.DEFAULT; private VarSet vars; /** * Number of variables in this potential. */ private int numVars; protected Matrix probs; protected AbstractTableFactor (BidirectionalIntObjectMap varMap) { initVars (varMap); setAsIdentity (); } private void initVars (BidirectionalIntObjectMap allVars) { initVars (Arrays.asList (allVars.toArray ())); } private void initVars (Variable allVars[]) { int sizes[] = new int[allVars.length]; vars = new HashVarSet (Arrays.asList (allVars)); // vars = new (universe, Arrays.asList (allVars)); // Arrays.sort (allVars); for (int i = 0; i < allVars.length; i++) { Variable var = vars.get (i); if (var.isContinuous ()) { throw new IllegalArgumentException ("Attempt to create table over continous variable "+allVars[i]); } sizes[i] = var.getNumOutcomes (); } probs = new Matrixn (sizes); if (probs.numLocations () == 0) { System.err.println ("Warning: empty potential created"); } numVars = allVars.length; } private void initVars (Collection allVars) { initVars ((Variable[]) allVars.toArray (new Variable[allVars.size ()])); } private void setProbs (double[] probArray) { if (probArray.length != probs.numLocations ()) { /* This shouldn't be a runtime exception. So sue me. */ throw new RuntimeException ("Attempt to initialize potential with bad number of prababilities.\n" + "Needed " + probs.numLocations () + " got " + probArray.length); } for (int i = 0; i < probArray.length; i++) { probs.setValueAtLocation (i, probArray[i]); } } /** * Creates an identity potential over the given variable. */ public AbstractTableFactor (Variable var) { initVars (new Variable[]{var}); setAsIdentity (); } public AbstractTableFactor (Variable var, double[] values) { initVars (new Variable[]{var}); setProbs (values); } /** * Creates an identity potential over NO variables. */ public AbstractTableFactor () { initVars (new Variable[]{}); setAsIdentity (); } /** * Creates an identity potential with the given variables. */ public AbstractTableFactor (Variable allVars []) { initVars (allVars); setAsIdentity (); } /** * Creates an identity potential with the given variables. * * @param allVars A collection containing the Variables * of this distribution. */ public AbstractTableFactor (Collection allVars) { initVars (allVars); setAsIdentity (); } /** * Creates a potential with the given variables and * the given probabilities. * * @param allVars Variables of the potential * @param probs All phi values of the potential, in row-major order. */ public AbstractTableFactor (Variable[] allVars, double[] probs) { initVars (allVars); setProbs (probs); } /** * Creates a potential with the given variables and * the given probabilities. * * @param allVars Variables of the potential * @param probs All phi values of the potential, in row-major order. */ private AbstractTableFactor (BidirectionalIntObjectMap allVars, double[] probs) { initVars (allVars); setProbs (probs); } /** * Creates a potential with the given variables and * the given probabilities. * * @param allVars Variables of the potential * @param probs All phi values of the potential, in row-major order. */ public AbstractTableFactor (VarSet allVars, double[] probs) { initVars (allVars.toVariableArray ()); setProbs (probs); } /** * Creates a potential with the given variables and * the given probabilities. * * @param allVars Variables of the potential * @param probsIn All the phi values of the potential. */ public AbstractTableFactor (Variable[] allVars, Matrix probsIn) { initVars (allVars); probs = (Matrix) probsIn.cloneMatrix (); } /** * Creates a potential with the given variables and * the given probabilities. * * @param allVars Variables of the potential * @param probsIn All the phi values of the potential. */ private AbstractTableFactor (BidirectionalIntObjectMap allVars, Matrix probsIn) { initVars (allVars); probs = (Matrix) probsIn.cloneMatrix (); } /** * Copy constructor. */ public AbstractTableFactor (AbstractTableFactor in) { //xxx Could be dangerous! But these should never be modified vars = in.vars; numVars = in.numVars; if (in.projectionCache == null) in.initializeProjectionCache (); projectionCache = in.projectionCache; } /** * Creates a potential with the given variables and * the given probabilities. * * @param allVars Variables of the potential * @param probsIn All the phi values of the potential. */ public AbstractTableFactor (VarSet allVars, Matrix probsIn) { initVars (allVars.toVariableArray ()); probs = (Matrix) probsIn.cloneMatrix (); } /** * Creates a potential with the same variables as another, but different probabilites. * @param ptl * @param probs */ public AbstractTableFactor (AbstractTableFactor ptl, double[] probs) { this (ptl.vars, probs); } /************************************************************************** * STATIC FACTORY METHODS **************************************************************************/ public static Factor makeIdentityFactor (AbstractTableFactor copy) { return new TableFactor (copy.vars); } void setAll (double val) { for (int i = 0; i < probs.numLocations (); i++) { probs.setSingleValue (i, val); } } /////////////////////////////////////////////////////////////////////////// // ABSTRACT METHODS /////////////////////////////////////////////////////////////////////////// /** * Forces this potential to be the identity (all 1s). */ abstract void setAsIdentity (); public abstract Factor duplicate (); public abstract Factor normalize (); public abstract double sum (); protected abstract AbstractTableFactor createBlankSubset (Variable[] vars); private AbstractTableFactor createBlankSubset (Collection vars) { return createBlankSubset ((Variable[]) vars.toArray (new Variable [vars.size ()])); } protected int getNumVars () { return numVars; } /////////////////////////////////////////////////////////////////////////// // This method is inherently dangerous b/c variable ordering issues. // Consider using setPhi(Assignment,double) instead. public void setValues (Matrix probs) { if (this.probs.singleSize () != probs.singleSize ()) throw new UnsupportedOperationException ("Trying to reset prob matrix with wrong number of probabilities. Previous num probs: "+ this.probs.singleSize ()+" New num probs: "+probs.singleSize ()); if (this.probs.getNumDimensions () != probs.getNumDimensions ()) throw new UnsupportedOperationException ("Trying to reset prob matrix with wrong number of dimensions."); this.probs = probs; } /** * Returns true iff this potential is over the given variable */ public boolean containsVar (Variable var) { return vars.contains (var); } /** * Returns set of variables in this potential. */ public VarSet varSet () { return new UnmodifiableVarSet (vars); } public AssignmentIterator assignmentIterator () { if (probs instanceof SparseMatrixn) { int[] idxs = ((SparseMatrixn) probs).getIndices (); if (idxs != null) { return new SparseAssignmentIterator (vars, idxs); } } return new DenseAssignmentIterator (vars); } public void setRawValue (Assignment assn, double value) { int[] indices = new int[numVars]; for (int i = 0; i < numVars; i++) { Variable var = getVariable (i); indices[i] = assn.get (var); } probs.setValue (indices, value); } public void setRawValue (AssignmentIterator it, double value) { probs.setSingleValue (it.indexOfCurrentAssn (), value); } protected void setRawValue (int loc, double value) { probs.setSingleValue (loc, value); } public abstract double value (Assignment assn); // Special function to do normalization in log space // Computes sum if this potential is in log space public double logsum () { return Math.log (probs.oneNorm ()); } public double entropy () { double h = 0; double p; for (AssignmentIterator it = assignmentIterator (); it.hasNext ();) { p = logValue (it); if (!Double.isInfinite (p)) h -= p * Math.exp (p); it.advance (); } return h; } // PROJECTION OF INDICES // Maps potentials --> int[] /* Be careful about this thing, however. It gets shallow copied whenever * a potential is duplicated, so if a potential were modified (e.g., * by expandToContain) while this was being shared, things could * get ugly. I think everything is all right at the moment, but keep * it in mind if inexplicable bugs show up in the future. -cas */ transient private TIntObjectHashMap projectionCache; // lazily constructed private void initializeProjectionCache () { projectionCache = universe.lookupProjectionCache (varSet ()); } /* Returns a hash value for subsets of this potential's variable set. * Note that the hash value depends only on the set's membership * (not its order), so that this hashing scheme would be unsafe * for the projection cache unless potential variables were always * in a canonical order, which they are. */ private int computeSubsetHashValue (DiscreteFactor subset) { // If potentials have more than 32 variables, we need to use an // expandable bitset, but then again, you probably wouldn't have // enough memory to represent the potential anyway assert getNumVars () <= 32; int result = 0; double numVars = subset.varSet ().size (); int lrgi = 0; // relies on variables being sorted for (int smi = 0; smi < numVars; smi++) { Object var = subset.getVariable (smi); // this loop breaks if subset is not in fact a subset, but that is an error anyway while (var != this.getVariable (lrgi)) { lrgi++; } result |= (1 << lrgi); } return result; } /* For below, I tried special casing this as: if (smallPotential.numVars == 1) { int projection[] = new int[probs.singleSize ()]; int largeDims[] = new int[numVars]; Variable smallVar = (Variable) smallPotential.varMap.lookupObject (0); int largeDim = this.varMap.lookupIndex (smallVar, false); assert largeDim != -1 : smallVar; for (int largeIdx = 0; largeIdx < probs.singleSize (); largeIdx++) { probs.singleToIndices (largeIdx, largeDims); projection[largeIdx] = largeDims[largeDim]; } return projection; } but this didn't seem to make a huge performance gain. */ private int[] computeLargeIdxToSmall (DiscreteFactor smallPotential) // private int largeIdxToSmall (int largeIdx, MultinomialPotential smallPotential) { int projection[] = new int[probs.numLocations ()]; int largeDims[] = new int[numVars]; int smallNumVars = smallPotential.varSet().size(); int smallDims[] = new int[smallNumVars]; for (int largeLoc = 0; largeLoc < probs.numLocations (); largeLoc++) { int largeIdx = probs.indexAtLocation (largeLoc); probs.singleToIndices (largeIdx, largeDims); // relies on variables being sorted int largeDim = 0; for (int smallDim = 0; smallDim < smallNumVars; smallDim++) { Variable smallVar = smallPotential.getVariable (smallDim); while (smallVar != this.getVariable (largeDim)) { largeDim++; } smallDims[smallDim] = largeDims[largeDim]; } projection[largeLoc] = smallPotential.singleIndex (smallDims); } return projection; } int[] largeIdxToSmall (DiscreteFactor smallPotential) // private int cachedlargeIdxToSmall (int largeIdx, MultinomialPotential smallPotential) { if (projectionCache == null) initializeProjectionCache (); // Special case where smallPtl has only one variable. Here // since ordering is not a problem, we can use a set-based // hash key. return cachedLargeIdxToSmall (smallPotential); // if (smallPotential.varSet ().size () == 1) { // return cachedLargeIdxToSmall (smallPotential); // } else { // return computeLargeIdxToSmall (smallPotential); // } } // Cached version of computeLargeIdxToSmall for ptls with a single variable. // This code is designed to work if smallPotential has multiple variables, // but it breaks if it's called with two potentials with the same // variables in different orders. // TODO: Make work for multiple variables (canonical ordering?) private int[] cachedLargeIdxToSmall (DiscreteFactor smallPotential) { int hashval = computeSubsetHashValue (smallPotential); Object ints = projectionCache.get (hashval); if (ints != null) { return (int[]) ints; } else { int[] projection = computeLargeIdxToSmall (smallPotential); projectionCache.put (hashval, projection); return projection; } } /** * Returns the marginal of this distribution over the given variables. */ public Factor marginalize (Variable vars[]) { assert varSet ().containsAll (Arrays.asList (vars)); // Perhaps throw exception instead return marginalizeInternal (createBlankSubset (vars)); } public Factor marginalize (Collection vars) { assert varSet ().containsAll (vars); // Perhaps throw exception instead return marginalizeInternal (createBlankSubset (vars)); } public Factor marginalize (Variable var) { assert varSet ().contains (var); // Perhaps throw exception instead return marginalizeInternal (createBlankSubset (new Variable[]{var})); } public Factor marginalizeOut (Variable var) { Set newVars = new HashVarSet (vars); newVars.remove (var); return marginalizeInternal (createBlankSubset (newVars)); } public Factor marginalizeOut (VarSet badVars) { Set newVars = new HashVarSet (vars); newVars.remove (badVars); return marginalizeInternal (createBlankSubset (newVars)); } protected abstract Factor marginalizeInternal (AbstractTableFactor result); public Factor extractMax (Variable var) { return extractMaxInternal (createBlankSubset (new Variable[] { var })); } public Factor extractMax (Variable[] vars) { return extractMaxInternal (createBlankSubset (vars)); } public Factor extractMax (Collection vars) { return extractMaxInternal (createBlankSubset (vars)); } private Factor extractMaxInternal (AbstractTableFactor result) { result.setAll (Double.NEGATIVE_INFINITY); int[] projection = largeIdxToSmall (result); /* Add each element of the single array of the large potential to the correct element in the small potential. */ for (int largeLoc = 0; largeLoc < probs.numLocations (); largeLoc++) { /* Convert a single-index from this distribution to one for the smaller distribution */ int smallIdx = projection[largeLoc]; /* Whew! Now, add it in. */ double largeValue = this.probs.valueAtLocation (largeLoc); double smallValue = result.probs.singleValue (smallIdx); if (largeValue > smallValue) { result.probs.setValueAtLocation (smallIdx, largeValue); } } return result; } private void expandToContain (DiscreteFactor pot) { // if so, expand this potential. this is not pretty if (needsToExpand (varSet (), pot.varSet ())) { VarSet newVarSet = new HashVarSet (varSet ()); newVarSet.addAll (pot.varSet ()); AbstractTableFactor newPtl = createBlankSubset (newVarSet); newPtl.multiplyByInternal (this); vars = newPtl.vars; probs = newPtl.probs; numVars = newPtl.numVars; initializeProjectionCache (); } } private boolean needsToExpand (VarSet mine, VarSet his) { int size_h = his.size (); int vi_m = 0; int vi_h = 0; Variable var_h, var_m; while ((vi_m < numVars) && (vi_h < size_h)) { var_m = mine.get (vi_m); var_h = his.get (vi_h); vi_m++; if (var_m == var_h) { vi_h++; } } return vi_h < size_h; } /** * Does the conceptual equivalent of this *= pot. * Assumes that pot's variables are a subset of * this potential's. */ public void multiplyBy (Factor pot) { if (pot instanceof DiscreteFactor) { DiscreteFactor factor = (DiscreteFactor) pot; expandToContain (factor); factor = ensureOperandCompatible (factor); multiplyByInternal (factor); } else if (pot instanceof ConstantFactor) { timesEquals (pot.value (new Assignment ())); } else { AbstractTableFactor tbl; try { tbl = pot.asTable (); } catch (UnsupportedOperationException e) { throw new UnsupportedOperationException ("Don't know how to multiply "+this+" by "+pot); } multiplyBy (tbl); } } /** * Ensures that <tt>this.inLogSpace == ptl.inLogSpace</tt>. If this is * not the case, return a copy of ptl logified or delogified as appropriate. * * @param ptl * @return A potential equivalent to ptl, possibly logified or delogified. * ptl itself could be returned. */ protected DiscreteFactor ensureOperandCompatible (DiscreteFactor ptl) { return ptl; }; // Does destructive multiplication on this, assuming this has all // the variables in pot. protected abstract void multiplyByInternal (DiscreteFactor ptl); protected abstract void plusEqualsInternal (DiscreteFactor ptl); /** * Returns the elementwise product of this potential and * another one. */ public Factor multiply (Factor dist) { Factor result = duplicate (); result.multiplyBy (dist); return result; } /** * Does the conceptual equivalent of this /= pot. * Assumes that pot's variables are a subset of * this potential's. */ public void divideBy (Factor pot) { if (pot instanceof DiscreteFactor) { DiscreteFactor pot1 = (DiscreteFactor) pot; // cheating expandToContain (pot1); pot1 = ensureOperandCompatible (pot1); divideByInternal (pot1); } else if (pot instanceof ConstantFactor) { timesEquals (1.0 / pot.value (new Assignment ())); } else { AbstractTableFactor tbl; try { tbl = pot.asTable (); } catch (UnsupportedOperationException e) { throw new UnsupportedOperationException ("Don't know how to multiply "+this+" by "+pot); } multiplyBy (tbl); } } // Does destructive divison on this, assuming this has all // the variables in pot. protected abstract void divideByInternal (DiscreteFactor ptl); // xxx Should return an assignment public int argmax () { int bestIdx = 0; double bestVal = probs.singleValue (0); for (int idx = 1; idx < probs.numLocations (); idx++) { double val = probs.singleValue (idx); if (val > bestVal) { bestVal = val; bestIdx = idx; } } return bestIdx; } private static final double EPS = 1e-5; public Assignment sample (Randoms r) { int loc = sampleLocation (r); return location2assignment (loc); } private Assignment location2assignment (int loc) { return new DenseAssignmentIterator (vars, loc).assignment (); } public int sampleLocation (Randoms r) { double sum = sum(); double sampled = r.nextUniform () * sum; double cum = 0; for (int idx = 0; idx < probs.numLocations (); idx++) { double val = value (idx); cum += val; if (sampled <= cum + EPS) { return idx; } } throw new RuntimeException ("Internal errors: Couldn't sample from potential "+this+"\n"+dumpToString ()+"\n Using value "+sampled); } public boolean almostEquals (Factor p) { return almostEquals (p, Maths.EPSILON); } public boolean almostEquals (Factor p, double epsilon) { if (!(p instanceof AbstractTableFactor)) { return false; } DiscreteFactor p2 = (DiscreteFactor) p; if (!varSet ().containsAll (p2.varSet ())) { return false; } if (!p2.varSet ().containsAll (varSet ())) { return false; } /* TODO: fold into probs.almostEqauals() if variable ordering * issues ever resolved. Also, consider using this in all * those hasConverged() functions. */ int[] projection = largeIdxToSmall (p2); for (int loc1 = 0; loc1 < probs.numLocations (); loc1++) { int idx2 = projection[loc1]; double v1 = valueAtLocation (loc1); double v2 = p2.value (idx2); if (Math.abs (v1 - v2) > epsilon) { return false; } } return true; } public Object clone () { return duplicate (); } public String toString () { StringBuffer s = new StringBuffer (1024); s.append ("["); s.append (GeneralUtils.classShortName(this)); s.append (" : "); s.append (varSet ()); s.append ("]"); return s.toString (); } public String dumpToString () { StringBuffer s = new StringBuffer (1024); s.append (this.toString ()); s.append ("\n"); int indices[] = new int[numVars]; for (int loc = 0; loc < probs.numLocations (); loc++) { int idx = probs.indexAtLocation (loc); probs.singleToIndices (idx, indices); for (int j = 0; j < numVars; j++) { s.append (indices[j]); s.append (" "); } double val = probs.singleValue (idx); s.append (val); s.append ("\n"); } s.append (" Sum = ").append (sum ()).append ("\n"); return s.toString (); } public boolean isNaN () { return probs.isNaN (); } public void printValues () { System.out.print ("["); for (int i = 0; i < probs.numLocations (); i++) { System.out.print (probs.valueAtLocation (i)); System.out.print (", "); } System.out.print ("]"); } public void printSizes () { int[] sizes = new int[numVars]; probs.getDimensions (sizes); System.out.print ("["); for (int i = 0; i < numVars; i++) { System.out.print (sizes[i] + ", "); } System.out.print ("]"); } public Variable findVariable (String name) { for (int i = 0; i < getNumVars (); i++) { Variable var = getVariable (i); if (var.getLabel().equals (name)) return var; } return null; } public int numLocations () { return probs.numLocations (); } public int indexAtLocation (int loc) { return probs.indexAtLocation (loc); } public Variable getVariable (int i) { return vars.get (i); } // Serialization private static final long serialVersionUID = 1; // If seralization-incompatible changes are made to these classes, // then smarts can be added to these methods for backward compatibility. private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); // rerun initializers of transient fields projectionCache = new TIntObjectHashMap (); } public void divideBy (double v) { probs.divideEquals (v); } /** Use of this method is discouraged. */ public abstract void setLogValue (Assignment assn, double logValue); /** Use of this method is discouraged. */ public abstract void setLogValue (AssignmentIterator assnIt, double logValue); /** Use of this method is discouraged. */ public abstract void setValue (AssignmentIterator assnIt, double logValue); static Factor hackyMixture (AbstractTableFactor ptl1, AbstractTableFactor ptl2, double weight) { // check that alphabets match if (ptl1.getNumVars() != ptl2.getNumVars()) { throw new IllegalArgumentException (); } for (int i = 0; i < ptl2.getNumVars(); i++) { if (ptl1.getVariable (i) != ptl2.getVariable (i)) { throw new IllegalArgumentException (); } } if (ptl1.ensureOperandCompatible (ptl2) != ptl2) throw new IllegalArgumentException (); AbstractTableFactor result = new TableFactor (ptl1.vars); for (int loc1 = 0; loc1 < ptl1.numLocations (); loc1++) { double val1 = ptl1.valueAtLocation (loc1); int idx = ptl1.indexAtLocation (loc1); double val2 = ptl2.value (idx); result.setRawValue (idx, weight * val1 + (1 - weight) * val2); } /* TIntHashSet indices = new TIntHashSet (); for (int loc = 0; loc < ptl1.probs.numLocations (); loc++) { indices.add (ptl1.probs.indexAtLocation (loc)); } for (int loc = 0; loc < ptl2.probs.numLocations (); loc++) { indices.add (ptl2.probs.indexAtLocation (loc)); } int[] idxs = indices.toArray (); Arrays.sort (idxs); double[] vals = new double[idxs.length]; if (ptl1 instanceof LogTableFactor) { // hack for (int i = 0; i < idxs.length; i++) { vals[i] = weight * Math.exp (ptl1.probs.singleValue (idxs[i])) + (1 - weight) * Math.exp (ptl2.probs.singleValue (idxs[i])); vals[i] = Math.log (vals[i]); } } else { for (int i = 0; i < idxs.length; i++) { vals[i] = weight * ptl1.probs.singleValue (idxs[i]) + (1 - weight) * ptl2.probs.singleValue (idxs[i]); } } int[] szs = new int [ptl1.probs.getNumDimensions ()]; ptl1.probs.getDimensions (szs); SparseMatrixn m = new SparseMatrixn (szs, idxs, vals); AbstractTableFactor result = ptl1.createBlankSubset (ptl1.varMap); result.setValues (m); */ if (!ptl1.isNaN () && !ptl2.isNaN () && result.isNaN ()) { System.err.println ("Oops! NaN in averaging.\n P1"+ptl1.isNaN ()+"\n P2:"+ptl2.isNaN ()+"\n Result:"+result.isNaN ()); } return result; } protected abstract double rawValue (int singleIdx); public double[] toValueArray () { Matrix matrix = getValueMatrix (); double[] arr = new double [matrix.numLocations ()]; for (int i = 0; i < arr.length; i++) { arr[i] = matrix.valueAtLocation (i); } return arr; } public int singleIndex (int[] smallDims) { return probs.singleIndex (smallDims); } public abstract Matrix getValueMatrix (); public abstract Matrix getLogValueMatrix (); public abstract void setLogValues (double[] vals); public abstract void setValues (double[] vals); public double[] toLogValueArray () { Matrix matrix = getLogValueMatrix (); if (matrix instanceof Matrixn) return ((Matrixn)matrix).toArray (); else if (matrix instanceof SparseMatrixn) return ((SparseMatrixn)matrix).toArray (); else throw new RuntimeException (); } public double[] getValues () { return ((Matrixn)getValueMatrix ()).toArray (); } /** Adds a constant to all values in the table. This is most useful to add a small constant to avoid zeros. */ public void plusEquals (double v) { for (int loc = 0; loc < numLocations (); loc++) { plusEqualsAtLocation (loc, v); } } public void plusEquals (Factor f) { if (f instanceof DiscreteFactor) { DiscreteFactor factor = (DiscreteFactor) f; expandToContain (factor); factor = ensureOperandCompatible (factor); plusEqualsInternal (factor); } else if (f instanceof ConstantFactor) { plusEquals (f.value (new Assignment ())); } else { AbstractTableFactor tbl; try { tbl = f.asTable (); } catch (UnsupportedOperationException e) { throw new UnsupportedOperationException ("Don't know how to add "+this+" by "+f); } plusEquals (tbl); } } /** Multiplies a constant by all values in the table. */ public abstract void timesEquals (double v); protected abstract void plusEqualsAtLocation (int loc, double v); /** * Multiplies this factor by the constant 1/max(). This ensures that the maximum * value of this factor is 1.0 */ public abstract AbstractTableFactor recenter (); public AbstractTableFactor asTable () { return this; } /** * Creates a new potential that is equal to this one, restricted to a given assignment. * @param assn Variables to hold as fixed * @return A new factor over VARS(factor)\VARS(assn) */ public Factor slice (Assignment assn) { Set intersection = varSet().intersection (assn.varSet ()); if (intersection.isEmpty ()) { return this; } else { HashVarSet clique = new HashVarSet (varSet ()); clique.removeAll (Arrays.asList (assn.getVars ())); return this.sliceInternal (clique.toVariableArray (), assn); } } private Factor sliceInternal (Variable[] vars, Assignment observed) { // Special case for speed if (vars.length == 1) { return slice_onevar (vars[0], observed); } else if (vars.length == 2) { return this.slice_twovar (vars[0], vars[1], observed); } else { return this.slice_general (vars, observed); } } protected abstract Factor slice_onevar (Variable var, Assignment observed); protected abstract Factor slice_twovar (Variable v1, Variable v2, Assignment observed); protected abstract Factor slice_general (Variable[] vars, Assignment observed); }
30,968
27.256387
132
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/CPT.java
/* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://mallet.cs.umass.edu/ This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.types; import java.util.Collection; import cc.mallet.util.Randoms; /** * $Id: CPT.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class CPT implements DiscreteFactor { private DiscreteFactor subFactor; private VarSet parents; private Variable child; public CPT (DiscreteFactor subFactor, Variable child) { this.subFactor = subFactor; this.child = child; if (!subFactor.containsVar (child)) { throw new IllegalArgumentException ("Invalid child var for CPT\n Child: " + child + "\n Factor: " + subFactor); } parents = new HashVarSet (subFactor.varSet ()); parents.remove (child); } public VarSet getParents () { return parents; } public Variable getChild () { return child; } public void setSubFactor (DiscreteFactor subFactor) { this.subFactor = subFactor; } public String toString () { return "CPT: Child ["+child+"]\n Factor: "+subFactor.toString (); } public double value (Assignment assn) {return subFactor.value (assn);} public double value (AssignmentIterator it) {return subFactor.value (it);} public Factor normalize () { return subFactor.normalize (); } public Factor marginalize (Variable[] vars) {return subFactor.marginalize (vars);} public Factor marginalize (Collection vars) {return subFactor.marginalize (vars);} public Factor marginalize (Variable var) {return subFactor.marginalize (var);} public Factor marginalizeOut (Variable var) {return subFactor.marginalizeOut (var);} public Factor extractMax (Collection vars) {return subFactor.extractMax (vars);} public Factor extractMax (Variable var) {return subFactor.extractMax (var);} public Factor extractMax (Variable[] vars) {return subFactor.extractMax (vars);} public int argmax () {return subFactor.argmax ();} public Assignment sample (Randoms r) {return subFactor.sample (r);} public double sum () {return subFactor.sum ();} public double entropy () {return subFactor.entropy ();} public Factor multiply (Factor dist) {return subFactor.multiply (dist);} public void multiplyBy (Factor pot) {subFactor.multiplyBy (pot);} public void exponentiate (double power) {subFactor.exponentiate (power);} public void divideBy (Factor pot) {subFactor.divideBy (pot);} public boolean containsVar (Variable var) {return subFactor.containsVar (var);} public VarSet varSet () {return subFactor.varSet ();} public AssignmentIterator assignmentIterator () {return subFactor.assignmentIterator ();} public boolean almostEquals (Factor p) {return subFactor.almostEquals (p);} public boolean almostEquals (Factor p, double epsilon) {return subFactor.almostEquals (p, epsilon);} public Factor duplicate () {return subFactor.duplicate ();} public boolean isNaN () {return subFactor.isNaN ();} public double logValue (AssignmentIterator it) {return subFactor.logValue (it);} public double logValue (Assignment assn) {return subFactor.logValue (assn);} public double logValue (int loc) {return subFactor.logValue (loc);} public Variable getVariable (int i) {return subFactor.getVariable (i);} public int sampleLocation (Randoms r) {return subFactor.sampleLocation (r);} public double value (int index) {return subFactor.value (index);} public int numLocations () {return subFactor.numLocations ();} public double valueAtLocation (int loc) {return subFactor.valueAtLocation (loc);} public int indexAtLocation (int loc) {return subFactor.indexAtLocation (loc);} public double[] toValueArray () {return subFactor.toValueArray ();} public int singleIndex (int[] smallDims) {return subFactor.singleIndex (smallDims);} public String dumpToString () { return subFactor.dumpToString (); } public Factor slice (Assignment assn) { return subFactor.slice (assn); } public AbstractTableFactor asTable () { return subFactor.asTable (); } public Factor marginalizeOut (VarSet varset) { return subFactor.marginalizeOut (varset); } }
4,435
30.460993
119
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/BoltzmannPairFactor.java
/* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.types; import cc.mallet.util.Randoms; /** * A factor over a continuous variable theta and binary variables <tt>var</tt>. * such that <tt>phi(x|theta)<tt> is Potts. That is, for fixed theta, <tt>phi(x)</tt> = 1 * if all x are equal, and <tt>exp^{-theta}</tt> otherwise. * $Id: BoltzmannPairFactor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class BoltzmannPairFactor extends AbstractFactor implements ParameterizedFactor { private Variable sigma; private Variable x1; // The binary variable private Variable x2; // The binary variable private VarSet xs; public BoltzmannPairFactor (Variable x1, Variable x2, Variable sigma) { super (new HashVarSet (new Variable[] { sigma, x1, x2 })); this.sigma = sigma; this.x1 = x1; this.x2 = x2; xs = new HashVarSet (new Variable[] { x1, x2 }); if (x1.getNumOutcomes () != 2) { throw new IllegalArgumentException ("Discrete variable "+x1+" in BoltzmannUnary must be binary."); } if (x2.getNumOutcomes () != 2) { throw new IllegalArgumentException ("Discrete variable "+x2+" in BoltzmannUnary must be binary."); } if (!sigma.isContinuous ()) { throw new IllegalArgumentException ("Parameter "+sigma +" in BoltzmannUnary must be continuous."); } } protected Factor extractMaxInternal (VarSet varSet) { throw new UnsupportedOperationException (); } protected double lookupValueInternal (int i) { throw new UnsupportedOperationException (); } protected Factor marginalizeInternal (VarSet varsToKeep) { throw new UnsupportedOperationException (); } /* Inefficient, but this will seldom be called. */ public double value (AssignmentIterator it) { Assignment assn = it.assignment(); Factor tbl = sliceForSigma (assn); return tbl.value (assn); } private Factor sliceForSigma (Assignment assn) { double sig = assn.getDouble (sigma); double[] vals = new double[] { Math.exp (-sig), 1, 1, 1 }; return new TableFactor (new Variable[] { x1, x2 }, vals); } public Factor normalize () { throw new UnsupportedOperationException (); } public Assignment sample (Randoms r) { throw new UnsupportedOperationException (); } public double logValue (AssignmentIterator it) { return Math.log (value (it)); } public Factor slice (Assignment assn) { Factor sigSlice = sliceForSigma (assn); // recursively slice, in case assn includes some of the xs return sigSlice.slice (assn); } public String dumpToString () { StringBuffer buf = new StringBuffer (); buf.append ("[Pair BM Factor: "); buf.append (x1); buf.append (" "); buf.append (x2); buf.append (" sigma="); buf.append (sigma); buf.append (" ]"); return buf.toString (); } public double sumGradLog (Factor q, Variable param, Assignment paramAssn) { if (param != sigma) throw new IllegalArgumentException (); Factor q_xs = q.marginalize (new Variable[] { x1, x2 }); Assignment assn = new Assignment (xs.toVariableArray (), new int[] { 0, 0 }); return - q_xs.value (assn); } public Factor duplicate () { return new BoltzmannPairFactor (x1, x2, sigma); } public boolean almostEquals (Factor p, double epsilon) { return equals (p); } public boolean isNaN () { return false; } public boolean equals (Object o) { if (this == o) return true; if (o == null || getClass () != o.getClass ()) return false; final BoltzmannPairFactor that = (BoltzmannPairFactor) o; if (sigma != null ? !sigma.equals (that.sigma) : that.sigma != null) return false; if (x1 != null ? !x1.equals (that.x1) : that.x1 != null) return false; if (x2 != null ? !x2.equals (that.x2) : that.x2 != null) return false; return true; } public int hashCode () { int result; result = (sigma != null ? sigma.hashCode () : 0); result = 29 * result + (x1 != null ? x1.hashCode () : 0); result = 29 * result + (x2 != null ? x2.hashCode () : 0); return result; } }
4,519
28.350649
106
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/AbstractInferencer.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import cc.mallet.grmm.types.*; import cc.mallet.grmm.util.Models; /** * Abstract base class for inferencers. This simply throws * an UnsupportedOperationException for all methods, which * is useful for subclasses that want to implement only * specific inference functionality. * * Created: Mon Oct 6 17:01:21 2003 * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: AbstractInferencer.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ abstract public class AbstractInferencer implements Inferencer, Cloneable { public abstract void computeMarginals (FactorGraph fg); public double lookupJoint (Assignment assn) { return Math.exp (lookupLogJoint (assn)); } public double lookupLogJoint (Assignment assn) { throw new UnsupportedOperationException (this.getClass().getName()+" doesn't compute joint probabilities."); } public Factor lookupMarginal (VarSet c) { switch (c.size()) { case 1: return lookupMarginal (c.get (0)); default: throw new UnsupportedOperationException (this.getClass().getName()+" doesn't compute marginals of arbitrary cliques."); } } // TODO: Make destructive... public double query (FactorGraph mdl, Assignment assn) { // Computes joint of assignment using chain rule double marginal = 1.0; for (int i = 0; i < assn.size(); i++) { Variable var = assn.getVariable (i); computeMarginals (mdl); Factor ptl = lookupMarginal (var); marginal *= ptl.value (assn); mdl = Models.addEvidence (mdl, new Assignment (var, assn.get (var))); } return marginal; } abstract public Factor lookupMarginal(Variable variable); public Inferencer duplicate () { try { return (Inferencer) clone(); } catch (CloneNotSupportedException e) { throw new RuntimeException (e); } } public void dump () { throw new UnsupportedOperationException (); } public void reportTime () { System.err.println ("AbstractInferencer: reportTime(): No report available."); } // Serialization garbage private static final long serialVersionUID = 1; } // AbstractInferencer
2,653
28.164835
89
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/JunctionTreePropagation.java
/* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://mallet.cs.umass.edu/ This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import java.util.Collection; import java.util.Iterator; import java.util.logging.Logger; import java.util.logging.Level; import java.io.Serializable; import java.io.ObjectOutputStream; import java.io.IOException; import java.io.ObjectInputStream; import cc.mallet.grmm.types.Factor; import cc.mallet.grmm.types.HashVarSet; import cc.mallet.grmm.types.VarSet; import cc.mallet.grmm.types.Variable; import cc.mallet.util.MalletLogger; /** * An implementation of Hugin-style propagation for junction trees. * This destructively modifies the junction tree so that its clique potentials * are the true marginals of the underlying graph. * <p/> * End users will not usually need to use this class directly. Use * <tt>JunctionTreeInferencer</tt> instead. * <p/> * This class is not an instance of Inferencer because it destructively * modifies the junction tree, which the Inferencer methods do not do to * factor graphs. * <p/> * Created: Feb 1, 2006 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: JunctionTreePropagation.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ class JunctionTreePropagation implements Serializable { private static Logger logger = MalletLogger.getLogger (JunctionTreePropagation.class.getName ()); transient private int totalMessagesSent = 0; private MessageStrategy strategy; public JunctionTreePropagation (MessageStrategy strategy) { this.strategy = strategy; } public static JunctionTreePropagation createSumProductInferencer () { return new JunctionTreePropagation (new SumProductMessageStrategy ()); } public static JunctionTreePropagation createMaxProductInferencer () { return new JunctionTreePropagation (new MaxProductMessageStrategy ()); } public int getTotalMessagesSent () { return totalMessagesSent; } public void computeMarginals (JunctionTree jt) { propagate (jt); jt.normalizeAll (); // Necessary if jt originally unnormalized } /* Hugin-style propagation for junction trees */ // bottom-up pass private void collectEvidence (JunctionTree jt, VarSet parent, VarSet child) { logger.finer ("collectEvidence " + parent + " --> " + child); for (Iterator it = jt.getChildren (child).iterator (); it.hasNext ();) { VarSet gchild = (VarSet) it.next (); collectEvidence (jt, child, gchild); } if (parent != null) { totalMessagesSent++; strategy.sendMessage (jt, child, parent); } } // top-down pass private void distributeEvidence (JunctionTree jt, VarSet parent) { for (Iterator it = jt.getChildren (parent).iterator (); it.hasNext ();) { VarSet child = (VarSet) it.next (); totalMessagesSent++; strategy.sendMessage (jt, parent, child); distributeEvidence (jt, child); } } private void propagate (JunctionTree jt) { VarSet root = (VarSet) jt.getRoot (); collectEvidence (jt, null, root); distributeEvidence (jt, root); } public Factor lookupMarginal (JunctionTree jt, VarSet varSet) { if (jt == null) { throw new IllegalStateException ("Call computeMarginals() first."); } VarSet parent = jt.findParentCluster (varSet); if (parent == null) { throw new UnsupportedOperationException ("No parent cluster in " + jt + " for clique " + varSet); } Factor cpf = jt.getCPF (parent); if (logger.isLoggable (Level.FINER)) { logger.finer ("Lookup jt marginal: clique " + varSet + " cluster " + parent); logger.finest (" cpf " + cpf); } Factor marginal = strategy.extractBelief (cpf, varSet); marginal.normalize (); return marginal; } public Factor lookupMarginal (JunctionTree jt, Variable var) { if (jt == null) { throw new IllegalStateException ("Call computeMarginals() first."); } VarSet parent = jt.findParentCluster (var); Factor cpf = jt.getCPF (parent); if (logger.isLoggable (Level.FINER)) { logger.finer ("Lookup jt marginal: var " + var + " cluster " + parent); logger.finest (" cpf " + cpf); } Factor marginal = strategy.extractBelief (cpf, new HashVarSet (new Variable[] { var })); marginal.normalize (); return marginal; } /////////////////////////////////////////////////////////////////////////// // MEESAGE STRATEGIES /////////////////////////////////////////////////////////////////////////// /** * Implements a strategy pattern for message sending. This allows sum-product * and max-product messages, e.g., to be different implementations of this strategy. */ public interface MessageStrategy { /** * Sends a message from the clique FROM to TO in a junction tree. */ public void sendMessage (JunctionTree jt, VarSet from, VarSet to); public Factor extractBelief (Factor cpf, VarSet varSet); } public static class SumProductMessageStrategy implements MessageStrategy, Serializable { /** * This sends a sum-product message, normalized to avoid * underflow. */ public void sendMessage (JunctionTree jt, VarSet from, VarSet to) { Collection sepset = jt.getSepset (from, to); Factor fromCpf = jt.getCPF (from); Factor toCpf = jt.getCPF (to); Factor oldSepsetPot = jt.getSepsetPot (from, to); Factor lambda = fromCpf.marginalize (sepset); lambda.normalize (); jt.setSepsetPot (lambda, from, to); toCpf = toCpf.multiply (lambda); toCpf.divideBy (oldSepsetPot); toCpf.normalize (); jt.setCPF (to, toCpf); } public Factor extractBelief (Factor cpf, VarSet varSet) { return cpf.marginalize (varSet); } // Serialization private static final long serialVersionUID = 1; private static final int CUURENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); out.writeInt (CUURENT_SERIAL_VERSION); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); in.readInt (); // version } } public static class MaxProductMessageStrategy implements MessageStrategy, Serializable { /** * This sends a max-product message. */ public void sendMessage (JunctionTree jt, VarSet from, VarSet to) { // System.err.println ("Send message "+from+" --> "+to); Collection sepset = jt.getSepset (from, to); Factor fromCpf = jt.getCPF (from); Factor toCpf = jt.getCPF (to); Factor oldSepsetPot = jt.getSepsetPot (from, to); Factor lambda = fromCpf.extractMax (sepset); lambda.normalize (); jt.setSepsetPot (lambda, from, to); toCpf = toCpf.multiply (lambda); toCpf.divideBy (oldSepsetPot); toCpf.normalize (); jt.setCPF (to, toCpf); } public Factor extractBelief (Factor cpf, VarSet varSet) { return cpf.extractMax (varSet); } // Serialization private static final long serialVersionUID = 1; private static final int CUURENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); out.writeInt (CUURENT_SERIAL_VERSION); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); in.readInt (); // version } } // Serialization private static final long serialVersionUID = 1; private static final int CUURENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); out.writeInt (CUURENT_SERIAL_VERSION); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); in.readInt (); // version } }
8,362
28.867857
99
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/GibbsSampler.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; //import edu.umass.cs.mallet.users.casutton.util.Timing; import java.util.ArrayList; import java.util.List; import java.util.Iterator; import cc.mallet.grmm.types.*; import cc.mallet.util.Randoms; import cc.mallet.util.Timing; /** * Created: Mar 28, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: GibbsSampler.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public class GibbsSampler implements Sampler { private int burnin; private Factor[] allCpts; private Randoms r = new Randoms (324231); public GibbsSampler () {} public GibbsSampler (int burnin) { this.burnin = burnin; } public GibbsSampler (Randoms r, int burnin) { this.burnin = burnin; this.r = r; } public void setBurnin (int burnin) { this.burnin = burnin; } public void setRandom (Randoms r) { this.r = r; } public Assignment sample (FactorGraph mdl, int N) { // initForGraph (mdl); Assignment assn = initialAssignment (mdl); if (assn == null) throw new IllegalArgumentException ("GibbsSampler: Could not find feasible assignment for model "+mdl); Timing timing = new Timing (); for (int i = 0; i < burnin; i++) { assn = doOnePass (mdl, assn); } timing.tick ("Burnin"); Assignment ret = new Assignment (); for (int i = 0; i < N; i++) { assn = doOnePass (mdl, assn); ret.addRow (assn); } timing.tick ("Sampling"); return ret; } private Assignment initialAssignment (FactorGraph mdl) { Assignment assn = new Assignment (); return initialAssignmentRec (mdl, assn, 0); } // backtracking search for a feasible assignment private Assignment initialAssignmentRec (FactorGraph mdl, Assignment assn, int fi) { if (fi >= mdl.factors ().size ()) return assn; Factor f = mdl.getFactor (fi); Factor sliced = f.slice (assn); if (sliced.varSet().isEmpty()) { double val = f.value (assn); if (val > 1e-50) { return initialAssignmentRec (mdl, assn, fi+1); } else { return null; } } for (AssignmentIterator it = sliced.assignmentIterator (); it.hasNext ();) { double val = sliced.value (it); if (val > 1e-50) { Assignment new_assn = Assignment.union (assn, it.assignment()); Assignment assn_ret = initialAssignmentRec (mdl, new_assn, fi+1); if (assn_ret != null) return assn_ret; } it.advance (); } return null; } private Assignment doOnePass (FactorGraph mdl, Assignment initial) { Assignment ret = (Assignment) initial.duplicate (); for (int vidx = 0; vidx < ret.size (); vidx++) { Variable var = mdl.get (vidx); DiscreteFactor subcpt = constructConditionalCpt (mdl, var, ret); int value = subcpt.sampleLocation (r); ret.setValue (var, value); } return ret; } // Warning: destructively modifies ret's assignment to fullAssn (I could save and restore, but I don't care private DiscreteFactor constructConditionalCpt (FactorGraph mdl, Variable var, Assignment fullAssn) { List ptlList = mdl.allFactorsContaining (var); LogTableFactor ptl = new LogTableFactor (var); for (AssignmentIterator it = ptl.assignmentIterator (); it.hasNext(); it.advance ()) { Assignment varAssn = it.assignment (); fullAssn.setValue (var, varAssn.get (var)); ptl.setRawValue (varAssn, sumValues (ptlList, fullAssn)); } ptl.normalize (); return ptl; } private double sumValues (List ptlList, Assignment assn) { double sum = 0; for (Iterator it = ptlList.iterator (); it.hasNext ();) { Factor ptl = (Factor) it.next (); sum += ptl.logValue (assn); } return sum; } }
4,205
26.490196
110
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/JunctionTreeInferencer.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import org._3pq.jgrapht.GraphHelper; import org._3pq.jgrapht.UndirectedGraph; import org._3pq.jgrapht.alg.ConnectivityInspector; import org._3pq.jgrapht.graph.SimpleGraph; import org._3pq.jgrapht.graph.ListenableUndirectedGraph; import org._3pq.jgrapht.traverse.BreadthFirstIterator; import cc.mallet.grmm.types.*; import cc.mallet.grmm.util.Graphs; import cc.mallet.types.Alphabet; import cc.mallet.util.MalletLogger; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.util.*; import java.util.logging.Level; import java.util.logging.Logger; /** * Does inference in general graphical models using * the Hugin junction tree algorithm. * * Created: Mon Nov 10 23:58:44 2003 * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: JunctionTreeInferencer.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public class JunctionTreeInferencer extends AbstractInferencer { private static Logger logger = MalletLogger.getLogger(JunctionTreeInferencer.class.getName()); private boolean inLogSpace; private JunctionTreePropagation propagator; public JunctionTreeInferencer() { this (JunctionTreePropagation.createSumProductInferencer ()); } // JunctionTreeInferencer constructor public JunctionTreeInferencer (JunctionTreePropagation propagator) { this.propagator = propagator; } public static JunctionTreeInferencer createForMaxProduct () { return new JunctionTreeInferencer (JunctionTreePropagation.createMaxProductInferencer ()); } private boolean isAdjacent (UndirectedGraph g, Variable v1, Variable v2) { return g.getEdge (v1, v2) != null; } transient protected JunctionTree jtCurrent; transient private ArrayList cliques; /** * Returns the number of edges that would be added to a graph if a * given vertex would be removed in the triangulation procedure. * The return value is the number of edges in the elimination * clique of V that are not already present. */ private int newEdgesRequired(UndirectedGraph mdl, Variable v) { int rating = 0; for (Iterator it1 = neighborsIterator (mdl,v); it1.hasNext();) { Variable neighbor1 = (Variable) it1.next(); Iterator it2 = neighborsIterator (mdl,v); while (it2.hasNext()) { Variable neighbor2 = (Variable) it2.next(); if (neighbor1 != neighbor2) { if (!isAdjacent (mdl, neighbor1, neighbor2)) { rating++; } } } } // System.out.println(v+" = "+rating); return rating; } /** * Returns the weight of the clique that would be added to a graph if a * given vertex would be removed in the triangulation procedure. * The return value is the number of edges in the elimination * clique of V that are not already present. */ private int weightRequired (UndirectedGraph mdl, Variable v) { int rating = 1; for (Iterator it1 = neighborsIterator (mdl,v); it1.hasNext();) { Variable neighbor = (Variable) it1.next(); rating *= neighbor.getNumOutcomes(); } // System.out.println(v+" = "+rating); return rating; } private void connectNeighbors(UndirectedGraph mdl, Variable v) { for (Iterator it1 = neighborsIterator(mdl,v); it1.hasNext();) { Variable neighbor1 = (Variable) it1.next(); Iterator it2 = neighborsIterator(mdl,v); while (it2.hasNext()) { Variable neighbor2 = (Variable) it2.next(); if (neighbor1 != neighbor2) { if (!isAdjacent (mdl, neighbor1, neighbor2)) { try { mdl.addEdge(neighbor1, neighbor2); } catch (Exception e) { throw new RuntimeException(e); } } } } } } // xx should refactor into Collections.any (Coll, TObjectProc) /* Return true iff a clique in L strictly contains c. */ private boolean findSuperClique(List l, VarSet c) { for (Iterator it = l.iterator(); it.hasNext();) { VarSet c2 = (VarSet) it.next(); if (c2.containsAll(c)) { return true; } } return false; } // works like the obscure <=> operator in Perl. private static int cmp(int i1, int i2) { if (i1 < i2) { return -1; } else if (i1 > i2) { return 1; } else { return 0; } } public Variable pickVertexToRemove (UndirectedGraph mdl, ArrayList lst) { Iterator it = lst.iterator(); Variable best = (Variable) it.next(); int bestVal1 = newEdgesRequired (mdl, best); int bestVal2 = weightRequired (mdl, best); while (it.hasNext()) { Variable v = (Variable) it.next(); int val = newEdgesRequired (mdl, v); if (val < bestVal1) { best = v; bestVal1 = val; bestVal2 = weightRequired (mdl, v); } else if (val == bestVal1) { int val2 = weightRequired (mdl, v); if (val2 < bestVal2) { best = v; bestVal1 = val; bestVal2 = val2; } } } return best; } /** * Adds edges to graph until it is triangulated. */ private void triangulate(final UndirectedGraph mdl) { UndirectedGraph mdl2 = dupGraph (mdl); ArrayList vars = new ArrayList(mdl.vertexSet()); Alphabet varMap = makeVertexMap(vars); cliques = new ArrayList(); // debug if (logger.isLoggable (Level.FINER)) { logger.finer ("Triangulating model: "+mdl); String ret = ""; for (int i = 0; i < vars.size(); i++) { Variable next = (Variable) vars.get(i); ret += next.toString() + "\n"; // " (" + mdl.getIndex(next) + ")\n "; } logger.finer(ret); } while (!vars.isEmpty()) { Variable v = (Variable) pickVertexToRemove (mdl2, vars); logger.finer("Triangulating vertex " + v); VarSet varSet = new BitVarSet (v.getUniverse (), GraphHelper.neighborListOf (mdl2, v)); varSet.add(v); if (!findSuperClique(cliques, varSet)) { cliques.add(varSet); if (logger.isLoggable (Level.FINER)) { logger.finer (" Elim clique " + varSet + " size " + varSet.size () + " weight " + varSet.weight ()); } } // must remove V from graph first, because adding the edges // will change the rating of other vertices connectNeighbors (mdl2, v); vars.remove(v); mdl2.removeVertex (v); } if (logger.isLoggable(Level.FINE)) { logger.fine("Triangulation done. Cliques are: "); int totSize = 0, totWeight = 0, maxSize = 0, maxWeight = 0; for (Iterator it = cliques.iterator(); it.hasNext();) { VarSet c = (VarSet) it.next(); logger.finer(c.toString()); totSize += c.size(); maxSize = Math.max(c.size(), maxSize); totWeight += c.weight(); maxWeight = Math.max(c.weight(), maxWeight); } double sz = cliques.size(); logger.fine("Jt created " + sz + " cliques. Size: avg " + (totSize / sz) + " max " + (maxSize) + " Weight: avg " + (totWeight / sz) + " max " + (maxWeight)); } } private Alphabet makeVertexMap(ArrayList vars) { Alphabet map = new Alphabet (vars.size (), Variable.class); map.lookupIndices(vars.toArray(), true); return map; } private static int sepsetSize(BitVarSet[] pair) { assert pair.length == 2; return pair[0].intersectionSize(pair[1]); } private static int sepsetCost(VarSet[] pair) { assert pair.length == 2; return pair[0].weight() + pair[1].weight(); } // Given two pairs of cliques, returns -1 if the pair o1 should be // added to the tree first. We add pairs that have the largest // mass (number of vertices in common) to ensure that the clique // tree satifies the running intersection property. private static Comparator sepsetChooser = new Comparator() { public int compare(Object o1, Object o2) { if (o1 == o2) return 0; BitVarSet[] pair1 = (BitVarSet[]) o1; BitVarSet[] pair2 = (BitVarSet[]) o2; int size1 = sepsetSize(pair1); int size2 = sepsetSize(pair2); int retval = -cmp(size1, size2); if (retval == 0) { // Break ties by adding the sepset with the // smallest cost (sum of weights of connected clusters) int cost1 = sepsetCost(pair1); int cost2 = sepsetCost(pair2); retval = cmp(cost1, cost2); // Still a tie? Break arbitrarily but consistently. if (retval == 0) { retval = cmp (o1.hashCode (), o2.hashCode ()); } } return retval; } }; private JunctionTree graphToJt (UndirectedGraph g) { JunctionTree jt = new JunctionTree (g.vertexSet ().size ()); Object root = g.vertexSet ().iterator ().next (); jt.add (root); for (Iterator it1 = new BreadthFirstIterator (g, root); it1.hasNext ();) { Object v1 = it1.next (); for (Iterator it2 = GraphHelper.neighborListOf (g, v1).iterator (); it2.hasNext ();) { Object v2 = it2.next (); if (jt.getParent (v1) != v2) { jt.addNode (v1, v2); } } } return jt; } private JunctionTree buildJtStructure() { TreeSet pq = new TreeSet(sepsetChooser); // Initialize pq with all possible edges... for (Iterator it = cliques.iterator(); it.hasNext();) { BitVarSet c1 = (BitVarSet) it.next(); for (Iterator it2 = cliques.iterator(); it2.hasNext();) { BitVarSet c2 = (BitVarSet) it2.next(); if (c1 == c2) break; pq.add(new BitVarSet[]{c1, c2}); } } // ...and add the edges to jt that come to the top of the queue // and don't cause a cycle. // xxx OK, this sucks. openjgraph doesn't allow adding // disconnected edges to a tree, so what we'll do is create a // Graph frist, then convert it to a Tree. ListenableUndirectedGraph g = new ListenableUndirectedGraph (new SimpleGraph ()); // first add every clique to the graph for (Iterator it = cliques.iterator(); it.hasNext();) { VarSet c = (VarSet) it.next(); g.addVertex (c); } ConnectivityInspector inspector = new ConnectivityInspector (g); g.addGraphListener (inspector); // then add n - 1 edges int numCliques = cliques.size(); int edgesAdded = 0; while (edgesAdded < numCliques - 1) { VarSet[] pair = (VarSet[]) pq.first(); pq.remove(pair); if (!inspector.pathExists(pair[0], pair[1])) { g.addEdge(pair[0], pair[1]); edgesAdded++; } } JunctionTree jt = graphToJt(g); if (logger.isLoggable (Level.FINER)) { logger.finer (" jt structure was " + jt); } return jt; } private void initJtCpts(FactorGraph mdl, JunctionTree jt) { for (Iterator it = jt.getVerticesIterator(); it.hasNext();) { VarSet c = (VarSet) it.next(); // DiscreteFactor ptl = createBlankFactor (c); // jt.setCPF(c, ptl); jt.setCPF (c, new ConstantFactor (1.0)); } for (Iterator it = mdl.factors ().iterator(); it.hasNext();) { Factor ptl = (Factor) it.next(); VarSet parent = jt.findParentCluster(ptl.varSet()); assert parent != null : "Unable to find parent cluster for ptl " + ptl + "in jt " + jt; Factor cpf = jt.getCPF(parent); Factor newCpf = cpf.multiply(ptl); jt.setCPF (parent, newCpf); /* debug if (jt.isNaN()) { throw new RuntimeException ("Got a NaN"); } */ } } private AbstractTableFactor createBlankFactor (VarSet c) { if (inLogSpace) { return new LogTableFactor (c); } else { return new TableFactor (c); } } public void computeMarginals (FactorGraph mdl) { inLogSpace = mdl.getFactor (0) instanceof LogTableFactor; buildJunctionTree(mdl); propagator.computeMarginals(jtCurrent); totalMessagesSent += propagator.getTotalMessagesSent(); } public void computeMarginals (JunctionTree jt) { inLogSpace = false; //?? jtCurrent = jt; propagator.computeMarginals(jtCurrent); totalMessagesSent += propagator.getTotalMessagesSent(); } /** * Constructs a junction tree from a given factor graph. Does not perform BP in the resulting * graph. So this gives you the structure of a jnuction tree, but the factors don't correspond * to the true marginals unless you call BP yourself. * @param mdl Factor graph to compute JT for. */ public JunctionTree buildJunctionTree(FactorGraph mdl) { jtCurrent = (JunctionTree) mdl.getInferenceCache(JunctionTreeInferencer.class); if (jtCurrent != null) { jtCurrent.clearCPFs(); } else { /* The graph g is the topology of the MRF that corresponds to the factor graph mdl. * Essentially, this means that we triangulate factor graphs by converting to an MRF first. * I could have chosen to trianglualte the FactorGraph directly, but I didn't for historical reasons * (I already had a version of triangulate() for MRFs, not bipartite factor graphs.) * Note that the call to mdlToGraph() is perfectly valid for FactorGraphs that are also DirectedModels, * and has the effect of moralizing in that case. */ UndirectedGraph g = Graphs.mdlToGraph (mdl); triangulate (g); jtCurrent = buildJtStructure(); mdl.setInferenceCache(JunctionTreeInferencer.class, jtCurrent); } initJtCpts(mdl, jtCurrent); return jtCurrent; } private UndirectedGraph dupGraph (UndirectedGraph original) { UndirectedGraph copy = new SimpleGraph (); GraphHelper.addGraph (copy, original); return copy; } public Factor lookupMarginal(Variable var) { return propagator.lookupMarginal (jtCurrent, var); } public Factor lookupMarginal(VarSet varSet) { return propagator.lookupMarginal (jtCurrent, varSet); } public double lookupLogJoint(Assignment assn) { return jtCurrent.lookupLogJoint(assn); } public double dumpLogJoint(Assignment assn) { return jtCurrent.dumpLogJoint(assn); } /** * Returns the JunctionTree computed from the last call to * {@link #computeMarginals}. Caller must not modify return value. */ public JunctionTree lookupJunctionTree () { return jtCurrent; } private Iterator neighborsIterator (UndirectedGraph g, Variable v) { return GraphHelper.neighborListOf (g, v).iterator (); } public void dump () { if (jtCurrent != null) { System.out.println("Current junction tree"); jtCurrent.dump(); } else { System.out.println("NO current junction tree"); } } transient private int totalMessagesSent = 0; /** * Returns the total number of messages this inferencer has sent. */ public int getTotalMessagesSent () { return totalMessagesSent; } // Serialization private static final long serialVersionUID = 1; // If seralization-incompatible changes are made to these classes, // then smarts can be added to these methods for backward compatibility. private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); } } // JunctionTreeInferencer
15,869
28.388889
111
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/SamplingInferencer.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import cc.mallet.grmm.types.*; /** * Approximate inferencer for graphical models using sampling. * A general inference engine that takes any Sampler engine, and performs * approximate inference using its samples. * Created: Mar 28, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: SamplingInferencer.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public class SamplingInferencer extends AbstractInferencer { private int N; private Sampler sampler; // Could save only sufficient statistics to save on memory transient Assignment samples; public SamplingInferencer (Sampler sampler, int n) { this.sampler = sampler; N = n; } public void computeMarginals (FactorGraph mdl) { samples = sampler.sample (mdl, N); } public Factor lookupMarginal (Variable var) { return lookupMarginal (new HashVarSet (new Variable[] { var })); } // don't try this for large cliques public Factor lookupMarginal (VarSet varSet) { Factor mrgl = samples.marginalize (varSet); AbstractTableFactor tbl = mrgl.asTable (); tbl.normalize (); return tbl; } // Serialization garbage private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeInt (N); out.writeObject (sampler); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.readInt (); // read version N = in.readInt (); sampler = (Sampler) in.readObject (); } public String toString () { return "(SamplingInferencer: "+sampler+" )"; } }
2,312
26.535714
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/TreeBP.java
/* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://mallet.cs.umass.edu/ This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import gnu.trove.THashSet; import java.util.Iterator; import cc.mallet.grmm.types.Factor; import cc.mallet.grmm.types.FactorGraph; import cc.mallet.grmm.types.Variable; /** * Implements the tree-based schedule of belief propagation for exact inference * in trees. Can be used either for sum-product or max-product. * <p> * <p> * Do not use the * Created: Feb 1, 2006 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: TreeBP.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public class TreeBP extends AbstractBeliefPropagation { transient private THashSet marked; transient private Variable root; public static TreeBP createForMaxProduct () { return (TreeBP) new TreeBP ().setMessager (new MaxProductMessageStrategy ()); } public void computeMarginals (FactorGraph fg) { initForGraph (fg); marked = new THashSet (); lambdaPropagation (fg, null, root); marked = new THashSet (); piPropagation (fg, root); } protected void initForGraph (FactorGraph fg) { super.initForGraph (fg); // Pick a root arbitrarily root = (Variable) fg.variablesIterator ().next (); } private void lambdaPropagation (FactorGraph mdl, Factor parent, Variable child) { logger.finer ("lambda propagation "+parent+" , "+child); marked.add (child); for (Iterator it = mdl.allFactorsContaining (child).iterator(); it.hasNext();) { Factor gchild = (Factor) it.next(); if (!marked.contains (gchild)) { lambdaPropagation (mdl, child, gchild); } } if (parent != null) { // sendLambdaMessage (mdl, child, parent); sendMessage (mdl, child, parent); } } private void lambdaPropagation (FactorGraph mdl, Variable parent, Factor child) { logger.finer ("lambda propagation "+parent+" , "+child); marked.add (child); for (Iterator it = child.varSet ().iterator(); it.hasNext();) { Variable gchild = (Variable) it.next(); if (!marked.contains (gchild)) { lambdaPropagation (mdl, child, gchild); } } if (parent != null) { // sendLambdaMessage (mdl, child, parent); sendMessage (mdl, child, parent); } } private void piPropagation (FactorGraph mdl, Variable var) { logger.finer ("Pi propagation from "+var); marked.add (var); for (Iterator it = mdl.allFactorsContaining (var).iterator(); it.hasNext();) { Factor child = (Factor) it.next(); if (!marked.contains (child)) { // sendPiMessage (mdl, var, child); sendMessage (mdl, var, child); piPropagation (mdl, child); } } } private void piPropagation (FactorGraph mdl, Factor factor) { logger.finer ("Pi propagation from "+factor); marked.add (factor); for (Iterator it = factor.varSet ().iterator(); it.hasNext();) { Variable child = (Variable) it.next(); if (!marked.contains (child)) { // sendPiMessage (mdl, var, child); sendMessage (mdl, factor, child); piPropagation (mdl, child); } } } }
3,500
29.443478
84
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/Utils.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import java.util.Iterator; import cc.mallet.grmm.types.*; import cc.mallet.types.MatrixOps; import gnu.trove.THashSet; /** * A bunch of static utilities useful for dealing with Inferencers. * Created: Jun 1, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: Utils.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public class Utils { /** * Returns ths value of -log Z in mdl according to the given inferencer. * If inf is exact, the answer will be exact; otherwise the answer will be * approximation * * @param mdl * @param inf An inferencer. <tt>inf.computeMarginals (mdl)</tt> must already have * been called. * @return The value of -logZ */ public static double lookupMinusLogZ (FactorGraph mdl, Inferencer inf) { Assignment assn = new Assignment (mdl, new int[mdl.numVariables ()]); double prob = inf.lookupLogJoint (assn); double energy = mdl.logValue (assn); return prob - energy; } public static double localMagnetization (Inferencer inferencer, Variable var) { if (var.getNumOutcomes () != 2) throw new IllegalArgumentException (); Factor marg = inferencer.lookupMarginal (var); AssignmentIterator it = marg.assignmentIterator (); double v1 = marg.value (it); it.advance (); double v2 = marg.value (it); return v1 - v2; } public static double[] allL1MarginalDistance (FactorGraph mdl, Inferencer inf1, Inferencer inf2) { double[] dist = new double [mdl.numVariables ()]; int i = 0; for (Iterator it = mdl.variablesIterator (); it.hasNext();) { Variable var = (Variable) it.next (); Factor bel1 = inf1.lookupMarginal (var); Factor bel2 = inf2.lookupMarginal (var); dist[i++] = Factors.oneDistance (bel1, bel2); } return dist; } public static double avgL1MarginalDistance (FactorGraph mdl, Inferencer inf1, Inferencer inf2) { double[] dist = allL1MarginalDistance (mdl, inf1, inf2); return MatrixOps.mean (dist); } public static double maxL1MarginalDistance (FactorGraph mdl, Inferencer inf1, Inferencer inf2) { double[] dist = allL1MarginalDistance (mdl, inf1, inf2); return MatrixOps.max (dist); } public static int[] toSizesArray (Variable[] vars) { int[] szs = new int [vars.length]; for (int i = 0; i < vars.length; i++) { szs[i] = vars[i].getNumOutcomes (); } return szs; } public static VarSet defaultIntersection (VarSet v1, VarSet v2) {// Grossly inefficient implementation THashSet hset = new THashSet (v1); hset.retainAll (v2); Variable[] ret = new Variable [hset.size ()]; int vai = 0; for (int vi = 0; vi < v1.size(); vi++) { Variable var = v1.get (vi); if (hset.contains (var)) { ret[vai++] = var; } } return new HashVarSet (ret); } }
3,327
30.102804
98
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/RandomGraphs.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import java.util.*; import cc.mallet.grmm.types.*; import cc.mallet.types.Dirichlet; import cc.mallet.types.Multinomial; /** * Utility class for generating many useful kinds of random graphical * models. * Created: Mar 26, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: RandomGraphs.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public class RandomGraphs { public static double[] generateAttractivePotentialValues (Random r, double edgeWeight) { double b = Math.abs (r.nextGaussian ()) * edgeWeight; double eB = Math.exp (b); double eMinusB = Math.exp (-b); return new double[] { eB, eMinusB, eMinusB, eB }; } public static double[] generateMixedPotentialValues (Random r, double edgeWeight) { double b = r.nextGaussian () * edgeWeight; double eB = Math.exp (b); double eMinusB = Math.exp (-b); return new double[] { eB, eMinusB, eMinusB, eB }; } /** * Constructs a square grid of a given size with random attractive potentials. * Graphs are generated as follows: * <p> * We use a spin (i.e., {-1, 1}) representation. For each edge st, * a single edge weight <tt>w_st</tt> is generated uniformly in (0,d). * Then exponential parameters for the BM representation are chosen by * <pre> * theta_st = 4 * w_st * theta_s = 2 (\sum(t in N(s)) w_st) * </pre> * * @param size The length on one edge of the grid. * @param edgeWeight A positive number giving the maximum potential strength * @param r Object for generating random numbers. * @return A randomly-generated undirected model. */ public static UndirectedGrid randomAttractiveGrid (int size, double edgeWeight, Random r) { UndirectedGrid mdl = new UndirectedGrid (size, size, 2); // Do grid from top left down.... for (int i = 0; i < size-1; i++) { for (int j = 0; j < size-1; j++) { Variable v = mdl.get (i, j); Variable vRight = mdl.get (i + 1, j); Variable vDown = mdl.get (i, j + 1); mdl.addFactor (v, vRight, generateAttractivePotentialValues (r, edgeWeight)); mdl.addFactor (v, vDown, generateAttractivePotentialValues (r, edgeWeight)); } } // and bottom edge for (int i = 0; i < size-1; i++) { Variable v = mdl.get (i, size - 1); Variable vRight = mdl.get (i + 1, size - 1); mdl.addFactor (v, vRight, generateAttractivePotentialValues (r, edgeWeight)); } // and finally right edge for (int i = 0; i < size-1; i++) { Variable v = mdl.get (size - 1, i); Variable vDown = mdl.get (size - 1, i + 1); mdl.addFactor (v, vDown, generateAttractivePotentialValues (r, edgeWeight)); } // and node potentials for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { double a = r.nextGaussian () * 0.0625; double[] b = new double[] { Math.exp (a), Math.exp (-a) }; TableFactor ptl = new TableFactor (mdl.get (i, j), b); mdl.addFactor (ptl); } } return mdl; } /** * Constructs a square grid of a given size with random repulsive potentials. * This means that if a node takes on a value, its neighbors are more likely * to take opposite values. * Graphs are generated as follows: * <p> * We use a spin (i.e., {-1, 1}) representation. For each edge st, * a single edge weight <tt>w_st</tt> is generated uniformly in (0,d). * Then exponential parameters for the BM representation are chosen by * <pre> * theta_st = 4 * w_st * theta_s = 2 (\sum(t in N(s)) w_st) * </pre> * * @param size The length on one edge of the grid. * @param edgeWeight A positive number giving the maximum ansolute potential strength * @param r Object for generating random numbers. * @return A randomly-generated undirected model. */ public static UndirectedGrid randomRepulsiveGrid (int size, double edgeWeight, Random r) { return randomAttractiveGrid (size, -edgeWeight, r); } /** * Constructs a square grid of a given size with random frustrated potentials. * This means that some potentials will be attractive (want to make their * neighbors more like them) and some will be repulsive (want to make their * neighbors different). * Graphs are generated as follows: * <p> * We use a spin (i.e., {-1, 1}) representation. For each edge st, * a single edge weight <tt>w_st</tt> is generated uniformly in (0,d). * Then exponential parameters for the BM representation are chosen by * <pre> * theta_st = 4 * w_st * theta_s = 2 (\sum(t in N(s)) w_st) * </pre> * * @param size The length on one edge of the grid. * @param edgeWeight A positive number giving the maximum potential strength * @param r Object for generating random numbers. * @return A randomly-generated undirected model. */ public static UndirectedGrid randomFrustratedGrid (int size, double edgeWeight, Random r) { UndirectedGrid mdl = new UndirectedGrid (size, size, 2); // Do grid from top left down.... for (int i = 0; i < size-1; i++) { for (int j = 0; j < size-1; j++) { Variable v = mdl.get(i,j); Variable vRight = mdl.get(i+1,j); Variable vDown = mdl.get(i,j+1); mdl.addFactor (v, vRight, generateMixedPotentialValues (r, edgeWeight)); mdl.addFactor (v, vDown, generateMixedPotentialValues (r, edgeWeight)); } } // and bottom edge for (int i = 0; i < size-1; i++) { Variable v = mdl.get (i, size - 1); Variable vRight = mdl.get (i + 1, size - 1); mdl.addFactor (v, vRight, generateMixedPotentialValues (r, edgeWeight)); } // and finally right edge for (int i = 0; i < size-1; i++) { Variable v = mdl.get (size - 1, i); Variable vDown = mdl.get (size - 1, i + 1); mdl.addFactor (v, vDown, generateMixedPotentialValues (r, edgeWeight)); } // and node potentials addRandomNodePotentials (r, mdl); return mdl; } public static UndirectedModel randomFrustratedTree (int size, int maxChildren, double edgeWeight, Random r) { UndirectedModel mdl = new UndirectedModel (); List leaves = new ArrayList (); Variable root = new Variable (2); leaves.add (root); while (mdl.numVariables () < size) { Variable parent = (Variable) removeRandomElement (leaves, r); int numChildren = r.nextInt (maxChildren) + 1; for (int ci = 0; ci < numChildren; ci++) { Variable child = new Variable (2); double[] vals = generateMixedPotentialValues (r, edgeWeight); mdl.addFactor (parent, child, vals); leaves.add (child); } } addRandomNodePotentials (r, mdl); return mdl; } private static Object removeRandomElement (List l, Random r) { int idx = r.nextInt (l.size ()); Object obj = l.get (idx); l.remove (idx); return obj; } public static void addRandomNodePotentials (Random r, FactorGraph mdl) { int size = mdl.numVariables (); for (int i = 0; i < size; i++) { Variable var = mdl.get (i); TableFactor ptl = randomNodePotential (r, var); mdl.addFactor (ptl); } } public static TableFactor randomNodePotential (Random r, Variable var) { double a = r.nextGaussian (); double[] b = new double[] { Math.exp(a), Math.exp(-a) }; TableFactor ptl = new TableFactor (var, b); return ptl; } public static FactorGraph createUniformChain (int length) { Variable[] vars = new Variable[length]; for (int i = 0; i < length; i++) vars[i] = new Variable (2); FactorGraph mdl = new UndirectedModel (vars); for (int i = 0; i < length - 1; i++) { double[] probs = new double[4]; Arrays.fill (probs, 1.0); mdl.addFactor (vars[i], vars[i + 1], probs); } return mdl; } public static FactorGraph createUniformGrid (int length) { return createGrid (new UniformFactorGenerator (), length); } public static FactorGraph createRandomChain (cc.mallet.util.Randoms r, int length) { Variable[] vars = new Variable[length]; for (int i = 0; i < length; i++) vars[i] = new Variable (2); Dirichlet dirichlet = new Dirichlet (new double[] { 1, 1, 1, 1 }); FactorGraph mdl = new FactorGraph (vars); for (int i = 0; i < length - 1; i++) { Multinomial m = dirichlet.randomMultinomial (r); double[] probs = m.getValues (); mdl.addFactor (vars[i], vars[i + 1], probs); } return mdl; } public static interface FactorGenerator { Factor nextFactor (VarSet vars); } public static class UniformFactorGenerator implements FactorGenerator { public Factor nextFactor (VarSet vars) { double[] probs = new double [vars.weight ()]; Arrays.fill (probs, 1.0); return new TableFactor (vars, probs); } } public static UndirectedModel createGrid (FactorGenerator gener, int size) { UndirectedGrid grid = new UndirectedGrid (size, size, 2); for (int x = 0; x < size; x++) { for (int y = 0; y < size - 1; y++) { Variable v1 = grid.get(x, y); Variable v2 = grid.get(x, y+1); VarSet vars = new HashVarSet (new Variable[] { v1, v2 }); Factor factor = gener.nextFactor (vars); grid.addFactor (factor); } } // add left-right edges for (int x = 0; x < size - 1; x++) { for (int y = 0; y < size; y++) { Variable v1 = grid.get(x, y); Variable v2 = grid.get(x+1,y); VarSet vars = new HashVarSet (new Variable[] { v1, v2 }); Factor factor = gener.nextFactor (vars); grid.addFactor (factor); } } return grid; } public static FactorGraph createGridWithObs (FactorGenerator gridGener, FactorGenerator obsGener, int size) { List allVars = new ArrayList (2 * size * size); Variable[][] gridVars = new Variable[size][size]; Variable[][] obsVars = new Variable[size][size]; for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { gridVars[i][j] = new Variable (2); gridVars[i][j].setLabel ("GRID["+i+"]["+j+"]"); obsVars[i][j] = new Variable (2); obsVars[i][j].setLabel ("OBS["+i+"]["+j+"]"); allVars.add (gridVars[i][j]); allVars.add (obsVars[i][j]); } } FactorGraph mdl = new FactorGraph ((Variable[]) allVars.toArray (new Variable[0])); // add grid edges for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { Variable var0 = gridVars[i][j]; if (i < size-1) { Variable varR = gridVars[i + 1][j]; HashVarSet clique = new HashVarSet (new Variable[] { var0, varR }); Factor factor = gridGener.nextFactor (clique); mdl.addFactor (factor); } if (j < size-1) { Variable varD = gridVars[i][j + 1]; HashVarSet clique = new HashVarSet (new Variable[] { var0, varD }); Factor factor = gridGener.nextFactor (clique); mdl.addFactor (factor); } } } // add obs edges for (int i = 0; i < size; i++) { for (int j = 0; j < size; j++) { Variable gridVar = gridVars[i][j]; Variable obsVar = obsVars[i][j]; HashVarSet clique = new HashVarSet (new Variable[] { gridVar, obsVar }); Factor factor = obsGener.nextFactor (clique); mdl.addFactor (factor); } } return mdl; } }
12,009
31.814208
109
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/BruteForceInferencer.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.util.Iterator; import cc.mallet.grmm.inference.AbstractInferencer; import cc.mallet.grmm.types.*; /** * Computes the joint of a GraphicalModel by brute-force * calculation. This is exponentially slow, so it is mostly * useful as a sanity check on more complicated algorithms. * * Created: Wed Sep 17 13:21:13 2003 * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: BruteForceInferencer.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public class BruteForceInferencer extends AbstractInferencer implements Inferencer { transient Factor cachedJoint; public Factor joint (FactorGraph model) { Factor joint = TableFactor.multiplyAll (model.factors ()); joint.normalize(); return joint; } public Factor joint (JunctionTree jt) { Factor joint = TableFactor.multiplyAll (jt.clusterPotentials ()); for (Iterator it = jt.sepsetPotentials().iterator(); it.hasNext();) { TableFactor pot = (TableFactor) it.next(); joint.divideBy (pot); } joint.normalize(); return joint; } public void computeMarginals (FactorGraph mdl) { cachedJoint = joint (mdl); } public void computeMarginals (JunctionTree jt) { cachedJoint = joint (jt); } public Factor lookupMarginal (Variable var) { return cachedJoint.marginalize (var); } public Factor lookupMarginal (VarSet c) { return cachedJoint.marginalize (c); } public double lookupJoint (Assignment assn) { return cachedJoint.value (assn); } public double lookupLogJoint (Assignment assn) { return Math.log (cachedJoint.value (assn)); } // Serialization private static final long serialVersionUID = 1; // If seralization-incompatible changes are made to these classes, // then smarts can be added to these methods for backward compatibility. private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); } }
2,627
25.28
93
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/TRP.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import gnu.trove.THashSet; import gnu.trove.THashMap; import gnu.trove.TIntObjectHashMap; import java.util.logging.Logger; import java.util.logging.Level; import java.util.*; import java.io.*; import org._3pq.jgrapht.UndirectedGraph; import org._3pq.jgrapht.Graph; import org._3pq.jgrapht.Edge; import org._3pq.jgrapht.traverse.BreadthFirstIterator; import org._3pq.jgrapht.graph.SimpleGraph; import org.jdom.Document; import org.jdom.JDOMException; import org.jdom.Element; import org.jdom.input.SAXBuilder; import cc.mallet.grmm.types.*; import cc.mallet.util.MalletLogger; /** * Implementation of Wainwright's TRP schedule for loopy BP * in general graphical models. * * @author Charles Sutton * @version $Id: TRP.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public class TRP extends AbstractBeliefPropagation { private static Logger logger = MalletLogger.getLogger (TRP.class.getName ()); private static final boolean reportSpanningTrees = false; private TreeFactory factory; private TerminationCondition terminator; private Random random = new Random (); /* Make sure that we've included all edges before we terminate. */ transient private TIntObjectHashMap factorTouched; transient private boolean hasConverged; transient private File verboseOutputDirectory = null; public TRP () { this (null, null); } public TRP (TreeFactory f) { this (f, null); } public TRP (TerminationCondition cond) { this (null, cond); } public TRP (TreeFactory f, TerminationCondition cond) { factory = f; terminator = cond; } public static TRP createForMaxProduct () { TRP trp = new TRP (); trp.setMessager (new MaxProductMessageStrategy ()); return trp; } // Accessors public TRP setTerminator (TerminationCondition cond) { terminator = cond; return this; } public TRP setFactory (TreeFactory factory) { this.factory = factory; return this; } // xxx should this be static? public void setRandomSeed (long seed) { random = new Random (seed); } public void setVerboseOutputDirectory (File verboseOutputDirectory) { this.verboseOutputDirectory = verboseOutputDirectory; } public boolean isConverged () { return hasConverged; } protected void initForGraph (FactorGraph m) { super.initForGraph (m); int numNodes = m.numVariables (); factorTouched = new TIntObjectHashMap (numNodes); hasConverged = false; if (factory == null) { factory = new AlmostRandomTreeFactory (); } if (terminator == null) { terminator = new DefaultConvergenceTerminator (); } else { terminator.reset (); } } private static cc.mallet.grmm.types.Tree graphToTree (Graph g) throws Exception { // Perhaps handle gracefully?? -cas if (g.vertexSet ().size () <= 0) { throw new RuntimeException ("Empty graph."); } Tree tree = new cc.mallet.grmm.types.Tree (); Object root = g.vertexSet ().iterator ().next (); tree.add (root); for (Iterator it1 = new BreadthFirstIterator (g, root); it1.hasNext();) { Object v1 = it1.next (); for (Iterator it2 = g.edgesOf (v1).iterator (); it2.hasNext ();) { Edge edge = (Edge) it2.next (); Object v2 = edge.oppositeVertex (v1); if (tree.getParent (v1) != v2) { tree.addNode (v1, v2); assert tree.getParent (v2) == v1; } } } return tree; } /** * Interface for tree-generation strategies for TRP. * <p/> * TRP works by repeatedly doing exact inference over spanning tree * of the original graph. But the trees can be chosen arbitrarily. * In fact, they don't need to be spanning trees; any acyclic * substructure will do. Users of TRP can tell it which strategy * to use by passing in an implementation of TreeFactory. */ public interface TreeFactory extends Serializable { public cc.mallet.grmm.types.Tree nextTree (FactorGraph mdl); } // This works around what appears to be a bug in OpenJGraph // connected sets. private static class SimpleUnionFind { private Map obj2set = new THashMap (); private Set findSet (Object obj) { Set container = (Set) obj2set.get (obj); if (container != null) { return container; } else { Set newSet = new THashSet (); newSet.add (obj); obj2set.put (obj, newSet); return newSet; } } private void union (Object obj1, Object obj2) { Set set1 = findSet (obj1); Set set2 = findSet (obj2); set1.addAll (set2); for (Iterator it = set2.iterator (); it.hasNext ();) { Object obj = it.next (); obj2set.put (obj, set1); } } public boolean noPairConnected (VarSet varSet) { for (int i = 0; i < varSet.size (); i++) { for (int j = i + 1; j < varSet.size (); j++) { Variable v1 = varSet.get (i); Variable v2 = varSet.get (j); if (findSet (v1) == findSet (v2)) { return false; } } } return true; } public void unionAll (Factor factor) { VarSet varSet = factor.varSet (); for (int i = 0; i < varSet.size (); i++) { Variable var = varSet.get (i); union (var, factor); } } } /** * Always adds edges that have not been touched, after that * adds random edges. */ public class AlmostRandomTreeFactory implements TreeFactory { public Tree nextTree (FactorGraph fullGraph) { SimpleUnionFind unionFind = new SimpleUnionFind (); ArrayList edges = new ArrayList (fullGraph.factors ()); ArrayList goodEdges = new ArrayList (fullGraph.numVariables ()); Collections.shuffle (edges, random); // First add all edges that haven't been used so far try { for (Iterator it = edges.iterator (); it.hasNext ();) { Factor factor = (Factor) it.next (); VarSet varSet = factor.varSet (); if (!isFactorTouched (factor) && unionFind.noPairConnected (varSet)) { goodEdges.add (factor); unionFind.unionAll (factor); it.remove (); } } // Now add as many other edges as possible for (Iterator it = edges.iterator (); it.hasNext ();) { Factor factor = (Factor) it.next (); VarSet varSet = factor.varSet (); if (unionFind.noPairConnected (varSet)) { goodEdges.add (factor); unionFind.unionAll (factor); } } for (Iterator it = goodEdges.iterator (); it.hasNext ();) { Factor factor = (Factor) it.next (); touchFactor (factor); } UndirectedGraph g = new SimpleGraph (); for (Iterator it = fullGraph.variablesIterator (); it.hasNext ();) { Variable var = (Variable) it.next (); g.addVertex (var); } for (Iterator it = goodEdges.iterator (); it.hasNext ();) { Factor factor = (Factor) it.next (); g.addVertex (factor); for (Iterator vit = factor.varSet ().iterator (); vit.hasNext ();) { Variable var = (Variable) vit.next (); g.addEdge (factor, var); } } Tree tree = graphToTree (g); if (reportSpanningTrees) { System.out.println ("********* SPANNING TREE *************"); System.out.println (tree.dumpToString ()); System.out.println ("********* END TREE *************"); } return tree; } catch (Exception e) { e.printStackTrace (); throw new RuntimeException (e); } } private static final long serialVersionUID = -7461763414516915264L; } /** * Generates spanning trees cyclically from a predefined collection. */ static public class TreeListFactory implements TreeFactory { private List lst; private Iterator it; public TreeListFactory (List l) { lst = l; it = lst.iterator (); } public TreeListFactory (cc.mallet.grmm.types.Tree[] arr) { lst = new ArrayList (java.util.Arrays.asList (arr)); it = lst.iterator (); } public static TreeListFactory makeFromReaders (FactorGraph fg, List readerList) { List treeList = new ArrayList (); for (Iterator it = readerList.iterator (); it.hasNext ();) { try { Reader reader = (Reader) it.next (); Document doc = new SAXBuilder ().build (reader); Element treeElt = doc.getRootElement (); Element rootElt = (Element) treeElt.getChildren ().get (0); Tree tree = readTreeRec (fg, rootElt); System.out.println (tree.dumpToString ()); treeList.add (tree); } catch (JDOMException e) { throw new RuntimeException (e); } catch (IOException e) { throw new RuntimeException (e); } } return new TreeListFactory (treeList); } /** @param fileList List of File objects. Each file should be an XML document describing a tree. */ public static TreeListFactory readFromFiles (FactorGraph fg, List fileList) { List treeList = new ArrayList (); for (Iterator it = fileList.iterator (); it.hasNext ();) { try { File treeFile = (File) it.next (); Document doc = new SAXBuilder ().build (treeFile); Element treeElt = doc.getRootElement (); Element rootElt = (Element) treeElt.getChildren ().get (0); treeList. add (readTreeRec (fg, rootElt)); } catch (JDOMException e) { throw new RuntimeException (e); } catch (IOException e) { throw new RuntimeException (e); } } return new TreeListFactory (treeList); } private static Tree readTreeRec (FactorGraph fg, Element elt) { List subtrees = new ArrayList (); for (Iterator it = elt.getChildren ().iterator (); it.hasNext ();) { Element child = (Element) it.next (); Tree subtree = readTreeRec (fg, child); subtrees.add (subtree); } Object parent = objFromElt (fg, elt); return Tree.makeFromSubtree (parent, subtrees); } private static Object objFromElt (FactorGraph fg, Element elt) { String type = elt.getName (); if (type.equals ("VAR")) { String vname = elt.getAttributeValue ("NAME"); return fg.findVariable (vname); } else if (type.equals("FACTOR")) { String varSetStr = elt.getAttributeValue ("VARS"); String[] vnames = varSetStr.split ("\\s+"); Variable[] vars = new Variable [vnames.length]; for (int i = 0; i < vnames.length; i++) { vars[i] = fg.findVariable (vnames[i]); } return fg.factorOf (new HashVarSet (vars)); } else { throw new RuntimeException ("Can't figure out element "+elt); } } public cc.mallet.grmm.types.Tree nextTree (FactorGraph mdl) { // If no more trees, rewind. if (!it.hasNext ()) { it = lst.iterator (); } return (cc.mallet.grmm.types.Tree) it.next (); } } // Termination conditions // will this need to be subclassed from outside? Will such // subclasses need access to the private state of TRP? static public interface TerminationCondition extends Cloneable, Serializable { // This takes the instances of trp as a parameter so that if a // TRP instance is cloned, and the terminator copied over, it // will still work. public boolean shouldContinue (TRP trp); public void reset (); // boy do I hate Java cloning public Object clone () throws CloneNotSupportedException; } static public class IterationTerminator implements TerminationCondition { int current; int max; public void reset () { current = 0; } public IterationTerminator (int m) { max = m; reset (); } public boolean shouldContinue (TRP trp) { current++; if (current >= max) { logger.finest ("***TRP quitting: Iteration " + current + " >= " + max); } return current <= max; } public Object clone () throws CloneNotSupportedException { return super.clone (); } } //xxx Delta is currently ignored. public static class ConvergenceTerminator implements TerminationCondition { double delta = 0.01; public ConvergenceTerminator () {} public ConvergenceTerminator (double delta) { this.delta = delta; } public void reset () { } public boolean shouldContinue (TRP trp) { /* if (oldMessages != null) retval = !checkForConvergence (trp); copyMessages(trp); return retval; */ boolean retval = !trp.hasConverged (delta); trp.copyOldMessages (); return retval; } public Object clone () throws CloneNotSupportedException { return super.clone (); } } // Runs until convergence, but doesn't stop until all edges have // been used at least once, and always stops after 1000 iterations. public static class DefaultConvergenceTerminator implements TerminationCondition { ConvergenceTerminator cterminator; IterationTerminator iterminator; String msg; public DefaultConvergenceTerminator () { this (0.001, 1000); } public DefaultConvergenceTerminator (double delta, int maxIter) { cterminator = new ConvergenceTerminator (delta); iterminator = new IterationTerminator (maxIter); msg = "***TRP quitting: over " + maxIter + " iterations"; } public void reset () { iterminator.reset (); cterminator.reset (); } // Terminate if converged or at insanely high # of iterations public boolean shouldContinue (TRP trp) { boolean notAllTouched = !trp.allEdgesTouched (); if (!iterminator.shouldContinue (trp)) { logger.warning (msg); if (notAllTouched) { logger.warning ("***TRP warning: Not all edges used!"); } return false; } if (notAllTouched) { return true; } else { return cterminator.shouldContinue (trp); } } public Object clone () throws CloneNotSupportedException { DefaultConvergenceTerminator dup = (DefaultConvergenceTerminator) super.clone (); dup.iterminator = (IterationTerminator) iterminator.clone (); dup.cterminator = (ConvergenceTerminator) cterminator.clone (); return dup; } } // And now, the heart of TRP: public void computeMarginals (FactorGraph m) { resetMessagesSentAtStart (); initForGraph (m); int iter = 0; while (terminator.shouldContinue (this)) { logger.finer ("TRP iteration " + (iter++)); cc.mallet.grmm.types.Tree tree = factory.nextTree (m); propagate (tree); dumpForIter (iter, tree); } iterUsed = iter; logger.info ("TRP used " + iter + " iterations."); doneWithGraph (m); } private void dumpForIter (int iter, Tree tree) { if (verboseOutputDirectory != null) { try { // output messages FileWriter writer = new FileWriter (new File (verboseOutputDirectory, "iter" + iter + ".txt")); dump (new PrintWriter (writer, true)); writer.close (); FileWriter bfWriter = new FileWriter (new File (verboseOutputDirectory, "beliefs" + iter + ".txt")); dumpBeliefs (new PrintWriter (bfWriter, true)); bfWriter.close (); // output spanning tree FileWriter treeWriter = new FileWriter (new File (verboseOutputDirectory, "tree" + iter + ".txt")); treeWriter.write (tree.toString ()); treeWriter.write ("\n"); treeWriter.close (); } catch (IOException e) { e.printStackTrace (); } } } private void dumpBeliefs (PrintWriter writer) { for (int vi = 0; vi < mdlCurrent.numVariables (); vi++) { Variable var = mdlCurrent.get (vi); Factor mrg = lookupMarginal (var); writer.println (mrg.dumpToString ()); writer.println (); } } private void propagate (cc.mallet.grmm.types.Tree tree) { Object root = tree.getRoot (); lambdaPropagation (tree, root); piPropagation (tree, root); } /** Sends BP messages starting from children to parents. This version uses constant stack space. */ private void lambdaPropagation (cc.mallet.grmm.types.Tree tree, Object root) { LinkedList openList = new LinkedList (); LinkedList closedList = new LinkedList (); openList.addAll (tree.getChildren (root)); while (!openList.isEmpty ()) { Object var = openList.removeFirst (); openList.addAll (tree.getChildren (var)); closedList.addFirst (var); } // Now open list contains all of the nodes (except the root) in reverse topological order. Send the messages. for (Iterator it = closedList.iterator (); it.hasNext ();) { Object child = it.next (); Object parent = tree.getParent (child); sendMessage (mdlCurrent, child, parent); } } /** Sends BP messages starting from parents to children. This version uses constant stack space. */ private void piPropagation (cc.mallet.grmm.types.Tree tree, Object root) { LinkedList openList = new LinkedList (); openList.add (root); while (!openList.isEmpty ()) { Object current = openList.removeFirst (); List children = tree.getChildren (current); for (Iterator it = children.iterator (); it.hasNext ();) { Object child = it.next (); sendMessage (mdlCurrent, current, child); openList.add (child); } } } private void sendMessage (FactorGraph fg, Object parent, Object child) { if (logger.isLoggable (Level.FINER)) logger.finer ("Sending message: "+parent+" --> "+child); if (parent instanceof Factor) { sendMessage (fg, (Factor) parent, (Variable) child); } else if (parent instanceof Variable) { sendMessage (fg, (Variable) parent, (Factor) child); } } private boolean allEdgesTouched () { Iterator it = mdlCurrent.factorsIterator (); while (it.hasNext ()) { Factor factor = (Factor) it.next (); int idx = mdlCurrent.getIndex (factor); int numTouches = getNumTouches (idx); if (numTouches == 0) { logger.finest ("***TRP continuing: factor " + idx + " not touched."); return false; } } return true; } private void touchFactor (Factor factor) { int idx = mdlCurrent.getIndex (factor); incrementTouches (idx); } private boolean isFactorTouched (Factor factor) { int idx1 = mdlCurrent.getIndex (factor); return (getNumTouches (idx1) > 0); } private int getNumTouches (int idx1) { Integer integer = (Integer) factorTouched.get (idx1); return (integer == null) ? 0 : integer.intValue (); } private void incrementTouches (int idx1) { int nt = getNumTouches (idx1); factorTouched.put (idx1, new Integer (nt + 1)); } public Factor query (DirectedModel m, Variable var) { throw new UnsupportedOperationException ("GRMM doesn't yet do directed models."); } //xxx could get moved up to AbstractInferencer, if mdlCurrent did. public Assignment bestAssignment () { int[] outcomes = new int [mdlCurrent.numVariables ()]; for (int i = 0; i < outcomes.length; i++) { Variable var = mdlCurrent.get (i); TableFactor ptl = (TableFactor) lookupMarginal (var); outcomes[i] = ptl.argmax (); } return new Assignment (mdlCurrent, outcomes); } // Deep copy termination condition public Object clone () { try { TRP dup = (TRP) super.clone (); if (terminator != null) { dup.terminator = (TerminationCondition) terminator.clone (); } return dup; } catch (CloneNotSupportedException e) { // should never happen throw new RuntimeException (e); } } // Serialization private static final long serialVersionUID = 1; // If seralization-incompatible changes are made to these classes, // then smarts can be added to these methods for backward compatibility. private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); } }
21,019
27.676671
114
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/ExactSampler.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import java.util.List; import java.util.ArrayList; import java.util.Iterator; import cc.mallet.grmm.types.*; import cc.mallet.util.Randoms; /** * Computes an exact sample from the distribution of a given factor graph by forming * a junction tree. * * Created: Nov 9, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: ExactSampler.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public class ExactSampler implements Sampler { Randoms r; public ExactSampler () { this (new Randoms ()); } public ExactSampler (Randoms r) { this.r = r; } public Assignment sample (FactorGraph mdl, int N) { JunctionTreeInferencer jti = new JunctionTreeInferencer (); jti.computeMarginals (mdl); JunctionTree jt = jti.lookupJunctionTree (); VarSet vs = mdl.varSet (); Assignment assns = new Assignment (); for (int i = 0; i < N; i++) { Assignment assn = sampleOneAssn (jt); assns.addRow (vs.toVariableArray (), reorderCols (assn, vs)); } return assns; } private Object[] reorderCols (Assignment assn, VarSet vs) { Object[] vals = new Object [vs.size ()]; for (int vi = 0; vi < vs.size(); vi++) { vals[vi] = assn.getObject (vs.get (vi)); } return vals; } private Assignment sampleOneAssn (JunctionTree jt) { VarSet root = (VarSet) jt.getRoot (); return sampleAssignmentRec (jt, new Assignment (), root); } private Assignment sampleAssignmentRec (JunctionTree jt, Assignment assn, VarSet varSet) { Factor marg = jt.getCPF (varSet); Factor slice = marg.slice (assn); Assignment sampled = slice.sample (r); assn = Assignment.union (assn, sampled); for (Iterator it = jt.getChildren (varSet).iterator(); it.hasNext();) { VarSet child = (VarSet) it.next (); Assignment other = sampleAssignmentRec (jt, assn, child); assn = Assignment.union (assn, other); } return assn; } public void setRandom (Randoms r) { this.r = r; } }
2,494
26.722222
90
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/AbstractBeliefPropagation.java
package cc.mallet.grmm.inference; import java.util.logging.Logger; import java.util.logging.Level; import java.util.List; import java.util.Iterator; import java.io.*; import cc.mallet.grmm.types.*; import cc.mallet.util.MalletLogger; /** * Abstract base class for umplementations of belief propagation for general factor graphs. * This class manages arrays of messages, computing beliefs from messages, and convergence * thresholds. * <p/> * How to send individual messages (e.g., sum-product, max-product, etc) are mananged * by istances of the interface @link{#MessageStrategy}. Concrete subclasses decide * which order to send messages in. * * @author Charles Sutton * @version $Id: AbstractBeliefPropagation.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public abstract class AbstractBeliefPropagation extends AbstractInferencer { protected static Logger logger = MalletLogger.getLogger (AbstractBeliefPropagation.class.getName ()); private static final boolean diagnoseConvergence = false; protected boolean normalizeBeliefs = true; static private int totalMessagesSent = 0; transient private int myMessagesSent = 0; transient private int messagesSentAtStart = 0; private double threshold = 0.00001; protected boolean useCaching = false; private MessageStrategy messager; protected transient int iterUsed; protected AbstractBeliefPropagation () { this (new SumProductMessageStrategy ()); } protected AbstractBeliefPropagation (MessageStrategy messager) { this.messager = messager; } public MessageStrategy getMessager () { return messager; } public AbstractBeliefPropagation setMessager (MessageStrategy messager) { this.messager = messager; return this; } /** * Returns the total number of messages all BP inferencers have sent in the current Java image. */ public static int getTotalMessagesSent () { return totalMessagesSent; } /** Returns the total number of messages this inferencer has sent since its creation. */ public int getMessagesSent () { return myMessagesSent; } /** * Returns the number of messages sent during the last call to * computeMarginals. */ public int getMessagesUsedLastTime () { return myMessagesSent - messagesSentAtStart; } protected void resetMessagesSentAtStart () { messagesSentAtStart = myMessagesSent; } /** * Array that maps (to, from) to the lambda message sent from node * from to node to. */ transient private MessageArray messages; transient private MessageArray oldMessages; // messages from variable --> factor transient private Factor[] bel; protected transient FactorGraph mdlCurrent; private void retrieveCachedMessages (FactorGraph m) { messages = (MessageArray) m.getInferenceCache (getClass ()); } private void cacheMessages (FactorGraph m) { m.setInferenceCache (getClass (), messages); } private void clearOldMessages () { oldMessages = null; } final protected void copyOldMessages () { clearOldMessages (); oldMessages = messages.duplicate (); } final protected boolean hasConverged () { return hasConverged (this.threshold); } final protected boolean hasConverged (double threshold) { double maxDiff = Double.NEGATIVE_INFINITY; Factor bestOldMsg = null, bestNewMsg = null; for (MessageArray.Iterator msgIt = oldMessages.iterator (); msgIt.hasNext ();) { Factor oldMsg = (Factor) msgIt.next (); Object from = msgIt.from (); Object to = msgIt.to (); Factor newMsg = messages.get (from, to); if (oldMsg != null) { assert newMsg != null : "Message went from nonnull to null " + from + " --> " + to; for (java.util.Iterator it = oldMsg.assignmentIterator (); it.hasNext ();) { Assignment assn = (Assignment) it.next (); double val1 = oldMsg.value (assn); double val2 = newMsg.value (assn); double diff = Math.abs (val1 - val2); if (diff > threshold) { if (diagnoseConvergence) { System.err.println ("*** Not converged: Difference of : " + diff + " from " + oldMsg + " --> " + newMsg); } return false; } else if (diff > maxDiff) { maxDiff = diff; bestOldMsg = oldMsg; bestNewMsg = newMsg; } } } } if (diagnoseConvergence) { System.err.println ( "*** CONVERGED: Max absolute difference : " + maxDiff + " from " + bestOldMsg + " --> " + bestNewMsg); } return true; } private void initOldMessages (FactorGraph fg) { oldMessages = new MessageArray (fg); if (useCaching && fg.getInferenceCache (getClass ()) != null) { logger.info ("AsyncLoopyBP: Reusing previous marginals"); retrieveCachedMessages (fg); copyOldMessages (); } else { for (java.util.Iterator it = fg.factorsIterator (); it.hasNext ();) { Factor factor = (Factor) it.next (); VarSet varset = factor.varSet (); for (java.util.Iterator vit = varset.iterator (); vit.hasNext ();) { Variable var = (Variable) vit.next (); oldMessages.put (var, factor, new TableFactor (var)); oldMessages.put (factor, var, new TableFactor (var)); } } } } transient protected int assignedVertexPtls[]; protected void initForGraph (FactorGraph mdl) { mdlCurrent = mdl; int numV = mdl.numVariables (); bel = new Factor [numV]; Object cache = mdl.getInferenceCache (getClass ()); if (useCaching && (cache != null)) { messages = (MessageArray) cache; } else { messages = new MessageArray (mdl); /* // setup self-messages for vertex potentials for (Iterator it = mdl.getVerticesIterator (); it.hasNext ();) { Variable var = (Variable) it.next (); Factor ptl = mdl.factorOfVar (var); if (ptl != null) { if (inLogSpace) { logger.finer ("BeliefPropagation: Using log space."); setMessage (i, i, new LogTableFactor ((AbstractTableFactor) ptl)); } else { setMessage (i, i, ptl); } } } */ } initOldMessages (mdl); messager.setMessageArray (messages, oldMessages); } protected void sendMessage (FactorGraph mdl, Variable from, Factor to) { totalMessagesSent++; myMessagesSent++; // System.err.println (GeneralUtils.classShortName (this)+" send message "+from+" --> "+to); messager.sendMessage (mdl, from, to); } protected void sendMessage (FactorGraph mdl, Factor from, Variable to) { totalMessagesSent++; myMessagesSent++; // System.err.println (GeneralUtils.classShortName (this)+" send message "+from+" --> "+to); messager.sendMessage (mdl, from, to); } protected void doneWithGraph (FactorGraph mdl) { clearOldMessages (); // free up memory if (useCaching) cacheMessages (mdl); } public int iterationsUsed () { return iterUsed; } public interface MessageStrategy { void setMessageArray (MessageArray msgs, MessageArray oldMsgs); void sendMessage (FactorGraph mdl, Factor from, Variable to); void sendMessage (FactorGraph mdl, Variable from, Factor to); Factor msgProduct (Factor product, int idx, int excludeMsgFrom); } public abstract static class AbstractMessageStrategy implements MessageStrategy { protected MessageArray messages; protected MessageArray oldMessages; public void setMessageArray (MessageArray msgs, MessageArray oldMsgs) { messages = msgs; oldMessages = oldMsgs; } public Factor msgProduct (Factor product, int idx, int excludeMsgFrom) { if (product == null) { product = createEmptyFactorForVar (idx); } for (MessageArray.ToMsgsIterator it = messages.toMessagesIterator (idx); it.hasNext ();) { it.next (); int j = it.currentFromIdx (); Factor msg = it.currentMessage (); if (j != excludeMsgFrom) { product.multiplyBy (msg); // assert product.varSet ().size () <= 2; } } return product; } private Factor createEmptyFactorForVar (int idx) { Factor product; if (messages.isInLogSpace ()) { product = new LogTableFactor ((Variable) messages.idx2obj (idx)); } else { product = new TableFactor ((Variable) messages.idx2obj (idx)); } return product; } } public static class SumProductMessageStrategy extends AbstractMessageStrategy implements Serializable { private double damping = 1.0; public SumProductMessageStrategy () { } public SumProductMessageStrategy (double damping) { this.damping = damping; } public void sendMessage (FactorGraph mdl, Factor from, Variable to) { int fromIdx = messages.getIndex (from); int toIdx = messages.getIndex (to); Factor product = from.duplicate (); msgProduct (product, fromIdx, toIdx); Factor msg = product.marginalize (to); msg.normalize (); if (logger.isLoggable (Level.FINEST)) { logger.info ("MSG "+from+" --> "+to); logger.info ("FACTOR: "+from.dumpToString()); logger.info ("MSG: "+msg.dumpToString ()); logger.info ("END MSG "+from+" --> "+to); } assert msg.varSet ().size () == 1; assert msg.varSet ().contains (to); makeDampedUpdate (fromIdx, toIdx, msg); } public void sendMessage (FactorGraph mdl, Variable from, Factor to) { // System.err.println ("...sum-prod message"); int fromIdx = messages.getIndex (from); int toIdx = messages.getIndex (to); Factor msg = msgProduct (null, fromIdx, toIdx); msg.normalize (); assert msg.varSet ().size () == 1; assert msg.varSet ().contains (from); messages.put (fromIdx, toIdx, msg); } private void makeDampedUpdate (int fromIdx, int toIdx, Factor msg) { if (damping < 1.0) { // there's damping Factor oldMsg = oldMessages.get (fromIdx, toIdx); // Factor oldMsg = messages.get (fromIdx, toIdx); if (oldMsg != null) { AbstractTableFactor oldTbl = (AbstractTableFactor) oldMsg.duplicate (); oldTbl.normalize (); oldTbl.timesEquals (1 - damping); AbstractTableFactor tbl = (AbstractTableFactor) msg; tbl.timesEquals (damping); tbl.plusEquals (oldTbl); msg = tbl; } } messages.put (fromIdx, toIdx, msg); } // Serialization private static final long serialVersionUID = 1; private static final int CUURENT_SERIAL_VERSION = 2; private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); out.writeInt (CUURENT_SERIAL_VERSION); out.writeDouble (damping); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); int version = in.readInt (); // version if (2 <= version) { damping = in.readDouble (); } } } public static class MaxProductMessageStrategy extends AbstractMessageStrategy implements Serializable { public void sendMessage (FactorGraph mdl, Factor from, Variable to) { // System.err.println ("...max-prod message"); int fromIdx = messages.getIndex (from); int toIdx = messages.getIndex (to); Factor product = from.duplicate (); msgProduct (product, fromIdx, toIdx); Factor msg = product.extractMax (to); msg.normalize (); assert msg.varSet ().size () == 1; assert msg.varSet ().contains (to); messages.put (fromIdx, toIdx, msg); } public void sendMessage (FactorGraph mdl, Variable from, Factor to) { // System.err.println ("...max-prod message"); int fromIdx = messages.getIndex (from); int toIdx = messages.getIndex (to); Factor msg = msgProduct (null, fromIdx, toIdx); msg.normalize (); assert msg.varSet ().size () == 1; assert msg.varSet ().contains (from); messages.put (fromIdx, toIdx, msg); } // Serialization private static final long serialVersionUID = 1; private static final int CUURENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); out.writeInt (CUURENT_SERIAL_VERSION); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); in.readInt (); // version } } public Factor lookupMarginal (Variable var) { int idx = mdlCurrent.getIndex (var); if ((idx < 0) || (idx > bel.length)) { throw new IllegalArgumentException ("Cannot find variable "+var+" in factor graph "+mdlCurrent); } if (bel[idx] == null) { Factor marg = messager.msgProduct (null, idx, Integer.MIN_VALUE); if (normalizeBeliefs) { marg.normalize (); } assert marg.varSet ().size () == 1 :"Invalid marginal for var " + var + ": " + marg; assert marg.varSet ().contains (var) :"Invalid marginal for var " + var + ": " + marg; bel[idx] = marg; } return bel[idx]; } public void dump () { messages.dump (); } public void reportTime () { System.err.println ("AbstractBeliefPropagation: Total messages sent = "+totalMessagesSent); } public void dump (PrintWriter writer) { messages.dump (writer); } // }}} // Serialization private static final long serialVersionUID = 1; // If seralization-incompatible changes are made to these classes, // then smarts can be added to these methods for backward compatibility. private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); } public Factor lookupMarginal (VarSet c) { if (c.size () == 1) { return lookupMarginal (c.get (0)); } else { List factors = mdlCurrent.allFactorsOf (c); if (factors.isEmpty ()) { throw new UnsupportedOperationException ("Cannot compute marginal of " + c + ": Must be either a single variable or a factor in the graph."); } return lookupMarginal (c, factors); } } private Factor lookupMarginal (VarSet vs, List factors) { Factor marginal = Factors.multiplyAll (factors); for (Iterator fit = factors.iterator (); fit.hasNext ();) { Factor factor = (Factor) fit.next (); for (java.util.Iterator it = vs.iterator (); it.hasNext ();) { Variable var = (Variable) it.next (); Factor msg = messages.get (var, factor); if (msg != null) { // if the inferencer was stopped early, there may be no message marginal.multiplyBy (msg); } } } marginal.normalize (); return marginal; } public double lookupLogJoint (Assignment assn) { double accum = 0.0; // Compute using BP-factorization // prod_s (p(x_s))^-(deg(s)-1) * ... for (java.util.Iterator it = mdlCurrent.variablesIterator (); it.hasNext ();) { Variable var = (Variable) it.next (); Factor ptl = lookupMarginal (var); int deg = mdlCurrent.getDegree (var); if (deg != 1) // Note that below works correctly for degree 0! { accum -= (deg - 1) * ptl.logValue (assn); } } // ... * prod_{c} p(x_C) for (java.util.Iterator it = mdlCurrent.varSetIterator (); it.hasNext ();) { VarSet varSet = (VarSet) it.next (); Factor p12 = lookupMarginal (varSet); double logphi = p12.logValue (assn); accum += logphi; } return accum; } }
16,006
27.13181
119
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/JunctionTree.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import java.util.Set; import java.util.HashSet; import java.util.Iterator; import java.util.Collection; import java.util.List; import java.util.Arrays; import cc.mallet.grmm.types.*; import gnu.trove.TIntObjectHashMap; import gnu.trove.THashSet; import gnu.trove.TIntObjectIterator; /** * Datastructure for a junction tree. * * Created: Tue Sep 30 10:30:25 2003 * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: JunctionTree.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public class JunctionTree extends Tree { private int numNodes; private static class Sepset { Sepset(Set s, Factor p) { set = s; ptl = p; } Set set; Factor ptl; } private TIntObjectHashMap sepsets; private Factor[] cpfs; public JunctionTree(int size) { super(); numNodes = size; sepsets = new TIntObjectHashMap(); cpfs = new Factor[size]; } // JunctionTree constructor public void addNode (Object parent1, Object child1) { super.addNode(parent1, child1); VarSet parent = (VarSet) parent1; VarSet child = (VarSet) child1; Set sepset = parent.intersection(child); int id1 = lookupIndex(parent); int id2 = lookupIndex(child); putSepset(id1, id2, new Sepset (sepset, newSepsetPtl (sepset))); } private Factor newSepsetPtl (Set sepset) { if (sepset.isEmpty ()) { // use identity factor return ConstantFactor.makeIdentityFactor (); } else { return new TableFactor (sepset); } } private int hashIdxIdx(int id1, int id2) { assert (id1 < 65536) && (id2 < 65536); int id; if (id1 < id2) { id = (id1 << 16) | id2; } else { id = (id2 << 16) | id1; } return id; } private void putSepset(int id1, int id2, Sepset sepset) { int id = hashIdxIdx(id1, id2); sepsets.put(id, sepset); } private Sepset getSepset(int id1, int id2) { int id = hashIdxIdx(id1, id2); return (Sepset) sepsets.get(id); } // CPF accessors public Factor getCPF(VarSet c) { return cpfs[lookupIndex(c)]; } public void setCPF(VarSet c, Factor pot) { cpfs[lookupIndex(c)] = pot; } void clearCPFs() { for (int i = 0; i < cpfs.length; i++) { cpfs[i] = new TableFactor ((VarSet) lookupVertex (i)); } TIntObjectIterator it = sepsets.iterator(); while (it.hasNext()) { it.advance(); Sepset sepset = (Sepset) it.value(); sepset.ptl = newSepsetPtl (sepset.set); } } public Set sepsetPotentials() { THashSet set = new THashSet(); TIntObjectIterator it = sepsets.iterator(); while (it.hasNext()) { it.advance(); Factor ptl = ((Sepset) it.value()).ptl; set.add(ptl); } return set; } void setSepsetPot(Factor pot, VarSet v1, VarSet v2) { int id1 = lookupIndex(v1); int id2 = lookupIndex(v2); getSepset(id1, id2).ptl = pot; } public Factor getSepsetPot(VarSet v1, VarSet v2) { int id1 = lookupIndex(v1); int id2 = lookupIndex(v2); return getSepset(id1, id2).ptl; } /** * Returns a collection of all the potentials of cliques in the junction tree. * (i.e., these are the terms in the numerator of the jounction tre theorem). * @see #sepsetPotentials() */ public Collection clusterPotentials () { HashSet h = new HashSet(); for (int i = 0; i < cpfs.length; i++) { if (cpfs[i] != null) { h.add(cpfs[i]); } } return h; } public Set getSepset(VarSet v1, VarSet v2) { int id1 = lookupIndex(v1); int id2 = lookupIndex(v2); return getSepset(id1, id2).set; } public Factor lookupMarginal(Variable var) { VarSet c = findParentCluster(var); Factor pot = getCPF(c); return pot.marginalize(var); } public double lookupLogJoint(Assignment assn) { double accum = 0; for (int i = 0; i < cpfs.length; i++) { if (cpfs[i] != null) { double phi = cpfs[i].logValue (assn); accum += phi; } } TIntObjectIterator it = sepsets.iterator(); while (it.hasNext()) { it.advance(); Factor ptl = ((Sepset) it.value()).ptl; double phi = ptl.logValue (assn); accum -= phi; } return accum; } /** Returns a cluster in the tree that contains var. */ public VarSet findParentCluster(Variable var) { int best = Integer.MAX_VALUE; VarSet retval = null; // xxx Inefficient for (Iterator it = getVerticesIterator(); it.hasNext();) { VarSet c = (VarSet) it.next(); if (c.contains(var) && c.weight() < best) { retval = c; best = c.weight(); } } return retval; } /** * Returns a cluster in the tree that contains all the vars in a * collection. */ public VarSet findParentCluster(Collection vars) { int best = Integer.MAX_VALUE; VarSet retval = null; // xxx Inefficient for (Iterator it = getVerticesIterator(); it.hasNext();) { VarSet c = (VarSet) it.next(); if (c.containsAll(vars) && c.weight() < best) { retval = c; best = c.weight(); } } return retval; } /** Returns a cluster in the tree that contains exactly the given * variables, or null if no such cluster exists. */ public VarSet findCluster(Variable[] vars) { List l = Arrays.asList(vars); for (Iterator it = getVerticesIterator(); it.hasNext();) { VarSet c2 = (VarSet) it.next(); if (c2.containsAll(l) && l.containsAll(c2)) return c2; } return null; } /** Normalizes all potentials in the tree, both node and sepset. */ public void normalizeAll() { int n = cpfs.length; for (int i = 0; i < n; i++) { if (cpfs[i] != null) { cpfs[i].normalize(); } } TIntObjectIterator it = sepsets.iterator(); while (it.hasNext()) { it.advance(); Factor ptl = ((Sepset) it.value()).ptl; ptl.normalize(); } } int getId(VarSet c) { return lookupIndex(c); } // Debugging functions public void dump () { int n = cpfs.length; // This will cause OpenJGraph to print all our nodes and edges System.out.println(this); // Now lets print all the cpfs System.out.println("Vertex CPFs"); for (int i = 0; i < n; i++) { if (cpfs[i] != null) { System.out.println("CPF "+i+" "+cpfs[i].dumpToString ()); } } // And the sepset potentials System.out.println("sepset CPFs"); TIntObjectIterator it = sepsets.iterator(); while (it.hasNext()) { it.advance(); Factor ptl = ((Sepset) it.value()).ptl; System.out.println(ptl.dumpToString ()); } System.out.println ("/End JT"); } public double dumpLogJoint (Assignment assn) { double accum = 0; for (int i = 0; i < cpfs.length; i++) { if (cpfs[i] != null) { double phi = cpfs[i].logValue (assn); System.out.println ("CPF "+i+" accum = "+accum); } } TIntObjectIterator it = sepsets.iterator(); while (it.hasNext()) { it.advance(); Factor ptl = ((Sepset) it.value()).ptl; double phi = ptl.logValue (assn); System.out.println("Sepset "+ptl.varSet()+" accum "+accum); } return accum; } public boolean isNaN() { int n = cpfs.length; for (int i = 0; i < n; i++) if (cpfs[i].isNaN()) return true; // And the sepset potentials TIntObjectIterator it = sepsets.iterator(); while (it.hasNext()) { it.advance(); Factor ptl = ((Sepset) it.value()).ptl; if (ptl.isNaN()) return true; } return false; } public double entropy () { double entropy = 0; for (Iterator it = clusterPotentials ().iterator (); it.hasNext ();) { Factor ptl = (Factor) it.next (); entropy += ptl.entropy (); } for (Iterator it = sepsetPotentials ().iterator (); it.hasNext ();) { Factor ptl = (Factor) it.next (); entropy -= ptl.entropy (); } return entropy; } // Implementation of edu.umass.cs.mallet.users.casutton.graphical.Compactible public void decompact() { cpfs = new Factor[numNodes]; clearCPFs(); } public void compact() { cpfs = null; } } // JunctionTree
8,756
21.113636
80
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/VariableElimination.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import java.util.HashSet; import java.util.Set; import java.util.Iterator; import java.util.Collection; import java.io.ObjectOutputStream; import java.io.IOException; import java.io.ObjectInputStream; import cc.mallet.grmm.types.Factor; import cc.mallet.grmm.types.FactorGraph; import cc.mallet.grmm.types.TableFactor; import cc.mallet.grmm.types.Variable; /** * The variable elimination algorithm for inference in graphical * models. * * Created: Mon Sep 22 17:34:00 2003 * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: VariableElimination.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public class VariableElimination extends AbstractInferencer { private Factor eliminate (Collection allPhi, Variable node) { HashSet phiSet = new HashSet(); /* collect the potentials that include this variable */ for (Iterator j = allPhi.iterator(); j.hasNext(); ) { Factor cpf = (Factor) j.next (); if (cpf.varSet().isEmpty() || cpf.containsVar (node)) { phiSet.add (cpf); j.remove (); } } return TableFactor.multiplyAll (phiSet); } /** * The bulk of the variable-elimination algorithm. Returns the * marginal density of the variable QUERY in the undirected * model MODEL, except that the density is un-normalized. * The normalization is done in a separate function to make * computeNormalizationFactor easier. */ public Factor unnormalizedMarginal (FactorGraph model, Variable query) { /* here the elimination order is random */ /* note that using buckets would make this more efficient as well. */ /* make a copy of potentials */ HashSet allPhi = new HashSet(); for (Iterator i = model.factorsIterator (); i.hasNext(); ){ Factor factor = (Factor) i.next (); allPhi.add(factor.duplicate()); } Set nodes = model.variablesSet (); /* Eliminate each node in turn */ for (Iterator i = nodes.iterator(); i.hasNext(); ) { Variable node = (Variable) i.next(); if (node == query) continue; // Eliminate the query variable last! Factor newCPF = eliminate (allPhi, node); /* Extract (marginalize) over this variables */ Factor singleCPF; if(newCPF.varSet().size() == 1) { singleCPF = newCPF; } else { singleCPF = newCPF.marginalizeOut (node); } /* add it back to the list of potentials */ allPhi.add(singleCPF); } /* Now, all the potentials that are left should contain only the * query variable.... UNLESS the graph is disconnected. So just * eliminate the query var. */ Factor marginal = eliminate (allPhi, query); assert marginal.containsVar (query); assert marginal.varSet().size() == 1; return marginal; } /** * Computes the normalization constant for a model. */ public double computeNormalizationFactor (FactorGraph m) { /* What we'll do is get the unnormalized marginal of an arbitrary * node; then sum the marginal to get the normalization factor. */ Variable var = (Variable) m.variablesSet ().iterator().next(); Factor marginal = unnormalizedMarginal (m, var); return marginal.sum (); } transient FactorGraph mdlCurrent; // Inert. All work done in lookupMarginal(). public void computeMarginals (FactorGraph m) { mdlCurrent = m; } public Factor lookupMarginal (Variable var) { Factor marginal = unnormalizedMarginal (mdlCurrent, var); marginal.normalize(); return marginal; } // Serialization private static final long serialVersionUID = 1; // If seralization-incompatible changes are made to these classes, // then smarts can be added to these methods for backward compatibility. private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); } }
4,386
28.05298
93
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/LoopyBP.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import java.util.Iterator; import java.util.Collections; import java.util.ArrayList; import java.util.Random; import java.io.ObjectOutputStream; import java.io.IOException; import java.io.ObjectInputStream; import cc.mallet.grmm.types.*; /** * The loopy belief propagation algorithm for approximate inference in * general graphical models. * * Created: Wed Nov 5 19:30:15 2003 * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: LoopyBP.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public class LoopyBP extends AbstractBeliefPropagation { public static final int DEFAULT_MAX_ITER = 1000; private int maxIter; private Random rand = new Random (); public void setUseCaching (boolean useCaching) { this.useCaching = useCaching; } // Note this does not have the sophisticated Terminator interface // that we've got in TRP. public LoopyBP () { this (new AbstractBeliefPropagation.SumProductMessageStrategy (), DEFAULT_MAX_ITER); } public LoopyBP (int maxIter) { this (new AbstractBeliefPropagation.SumProductMessageStrategy (), maxIter); } public LoopyBP (AbstractBeliefPropagation.MessageStrategy messager, int maxIter) { super (messager); this.maxIter = maxIter; } public static Inferencer createForMaxProduct () { return new LoopyBP (new MaxProductMessageStrategy (), DEFAULT_MAX_ITER); } public LoopyBP setRand (Random rand) { this.rand = rand; return this; } public void computeMarginals (FactorGraph mdl) { super.initForGraph (mdl); int iter; for (iter = 0; iter < maxIter; iter++) { logger.finer ("***AsyncLoopyBP iteration "+iter); propagate (mdl); if (hasConverged ()) break; copyOldMessages (); } iterUsed = iter; if (iter >= maxIter) { logger.info ("***Loopy BP quitting: not converged after "+maxIter+" iterations."); } else { iterUsed++; // there's an off-by-one b/c of location of above break logger.info ("***AsyncLoopyBP converged: "+iterUsed+" iterations"); } doneWithGraph (mdl); } private void propagate (FactorGraph mdl) { // Send all messages in random order. ArrayList factors = new ArrayList (mdl.factors()); Collections.shuffle (factors, rand); for (Iterator it = factors.iterator(); it.hasNext();) { Factor factor = (Factor) it.next(); for (Iterator vit = factor.varSet ().iterator (); vit.hasNext ();) { Variable from = (Variable) vit.next (); sendMessage (mdl, from, factor); } } for (Iterator it = factors.iterator(); it.hasNext();) { Factor factor = (Factor) it.next(); for (Iterator vit = factor.varSet ().iterator (); vit.hasNext ();) { Variable to = (Variable) vit.next (); sendMessage (mdl, factor, to); } } } // Serialization private static final long serialVersionUID = 1; // If seralization-incompatible changes are made to these classes, // then smarts can be added to these methods for backward compatibility. private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); } }
3,753
29.520325
109
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/MessageArray.java
/* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://mallet.cs.umass.edu/ This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import gnu.trove.TIntObjectIterator; import java.io.PrintWriter; import java.io.OutputStreamWriter; import cc.mallet.grmm.types.Factor; import cc.mallet.grmm.types.FactorGraph; import cc.mallet.grmm.types.LogTableFactor; import cc.mallet.grmm.types.Variable; import cc.mallet.grmm.util.MIntInt2ObjectMap; /** * Efficiently manages a array of messages in a factor graph from * variables to factors and vice versa. * * Created: Feb 1, 2006 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: MessageArray.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public class MessageArray { private FactorGraph fg; private MIntInt2ObjectMap messages; // messages from factor --> variable private int numV; private int numF; private boolean inLogSpace; public MessageArray (FactorGraph fg) { this.fg = fg; numV = fg.numVariables (); numF = fg.factors ().size(); messages = new MIntInt2ObjectMap (numV + numV); inLogSpace = (fg.getFactor (0) instanceof LogTableFactor); } public boolean isInLogSpace () { return inLogSpace; } public Factor get (Object from, Object to) { if (from instanceof Factor && to instanceof Variable) { return get ((Factor) from, (Variable) to); } else if (from instanceof Variable && to instanceof Factor) { return get ((Variable) from, (Factor) to); } else { throw new IllegalArgumentException (); } } public Factor get (Variable from, Factor to) { int fromIdx = getIndex (from); int toIdx = getIndex (to); return get (toIdx, fromIdx); } public Factor get (Factor from, Variable to) { int fromIdx = getIndex (from); int toIdx = getIndex (to); return get (toIdx, fromIdx); } Factor get (int toIdx, int fromIdx) { return (Factor) messages.get (toIdx, fromIdx); } public void put (Factor from, Variable to, Factor msg) { int fromIdx = getIndex (from); int toIdx = getIndex (to); messages.put (toIdx, fromIdx, msg); } public void put (Variable from, Factor to, Factor msg) { int fromIdx = getIndex (from); int toIdx = getIndex (to); messages.put (toIdx, fromIdx, msg); } // more dangerous, but for efficiency public void put (int fromIdx, int toIdx, Factor msg) { messages.put (toIdx, fromIdx, msg); } public Iterator iterator () { return new Iterator (); } public ToMsgsIterator toMessagesIterator (int toIdx) { return new ToMsgsIterator (messages, toIdx); } public MessageArray duplicate () { MessageArray dup = new MessageArray (fg); dup.messages = deepCopy (messages); return dup; } public MIntInt2ObjectMap deepCopy (MIntInt2ObjectMap msgs) { MIntInt2ObjectMap copy = new MIntInt2ObjectMap (numV + numF); int[] keys1 = msgs.keys1 (); for (int i = 0; i < keys1.length; i++) { int k1 = keys1[i]; ToMsgsIterator msgIt = new ToMsgsIterator (msgs, k1); while (msgIt.hasNext ()) { Factor msg = msgIt.next (); int from = msgIt.currentFromIdx (); copy.put (k1, from, msg.duplicate ()); } } return copy; } public int getIndex (Factor from) { return -(fg.getIndex (from) + 1); } public int getIndex (Variable to) { return fg.getIndex (to); } public Object idx2obj (int idx) { if (idx >= 0) { return fg.get (idx); } else { return fg.getFactor (-idx - 1); } } public void dump () { dump (new PrintWriter (new OutputStreamWriter (System.out), true)); } public void dump (PrintWriter out) { for (MessageArray.Iterator it = iterator (); it.hasNext ();) { Factor msg = (Factor) it.next (); Object from = it.from (); Object to = it.to (); out.println ("Message from " + from + " to " + to); out.println (msg.dumpToString ()); } } public final class Iterator implements java.util.Iterator { int idx1 = 0; int idx2 = -1; int[] keys1; int[] keys2; public Iterator () { keys1 = messages.keys1 (); if (keys1.length > 0) { keys2 = messages.keys2 (keys1[idx1]); } else { keys2 = new int [0]; } } private void increment () { idx2++; if (idx2 >= keys2.length) { idx2 = 0; idx1++; keys2 = messages.keys2 (keys1[idx1]); } } public boolean hasNext () { return (idx1+1 < keys1.length) || (idx2+1 < keys2.length); } public Object next () { increment (); return messages.get (keys1[idx1], keys2[idx2]); } public void remove () { throw new UnsupportedOperationException (); } public Object from () { return idx2obj (keys2[idx2]); } public Object to () { return idx2obj (keys1[idx1]); } } final public static class ToMsgsIterator { private TIntObjectIterator subIt; private int toIdx = -1; private ToMsgsIterator (MIntInt2ObjectMap msgs, int toIdx) { this.toIdx = toIdx; subIt = msgs.curry (toIdx); } public boolean hasNext () { return subIt.hasNext (); } public Factor next () { subIt.advance (); return currentMessage (); } int currentFromIdx () { return subIt.key (); } public Factor currentMessage () { return (Factor) subIt.value (); } public int currentToIdx () { return toIdx; } } }
5,889
22.466135
77
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/ResidualBP.java
/* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import java.util.Random; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.io.ObjectOutputStream; import java.io.IOException; import java.io.ObjectInputStream; import cc.mallet.grmm.types.Factor; import cc.mallet.grmm.types.FactorGraph; import cc.mallet.grmm.types.Variable; /** * A dynamic BP schedule where * The loopy belief propagation algorithm for approximate inference in * general graphical models. * * Created: Wed Nov 5 19:30:15 2003 * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: ResidualBP.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public class ResidualBP extends AbstractBeliefPropagation { public static final int DEFAULT_MAX_ITER = 1000; transient private int iterUsed; private int maxIter; private Random rand = new Random (); public int iterationsUsed () { return iterUsed; } public void setUseCaching (boolean useCaching) { this.useCaching = useCaching; } // Note this does not have the sophisticated Terminator interface // that we've got in TRP. public ResidualBP () { this (new SumProductMessageStrategy (), ResidualBP.DEFAULT_MAX_ITER); } public ResidualBP (int maxIter) { this (new SumProductMessageStrategy (), maxIter); } public ResidualBP (MessageStrategy messager, int maxIter) { super (messager); this.maxIter = maxIter; } public static Inferencer createForMaxProduct () { return new ResidualBP (new MaxProductMessageStrategy (), ResidualBP.DEFAULT_MAX_ITER); } public ResidualBP setRand (Random rand) { this.rand = rand; return this; } public void computeMarginals (FactorGraph mdl) { super.initForGraph (mdl); int iter; for (iter = 0; iter < maxIter; iter++) { logger.finer ("***AsyncLoopyBP iteration "+iter); propagate (mdl); if (hasConverged ()) break; copyOldMessages (); } iterUsed = iter; if (iter >= maxIter) { logger.info ("***Loopy BP quitting: not converged after "+maxIter+" iterations."); } else { iterUsed++; // there's an off-by-one b/c of location of above break logger.info ("***AsyncLoopyBP converged: "+iterUsed+" iterations"); } doneWithGraph (mdl); } private void propagate (FactorGraph mdl) { // Send all messages in random order. ArrayList factors = new ArrayList (mdl.factors()); Collections.shuffle (factors, rand); for (Iterator it = factors.iterator(); it.hasNext();) { Factor factor = (Factor) it.next(); for (Iterator vit = factor.varSet ().iterator (); vit.hasNext ();) { Variable from = (Variable) vit.next (); sendMessage (mdl, from, factor); } } for (Iterator it = factors.iterator(); it.hasNext();) { Factor factor = (Factor) it.next(); for (Iterator vit = factor.varSet ().iterator (); vit.hasNext ();) { Variable to = (Variable) vit.next (); sendMessage (mdl, factor, to); } } } // Serialization private static final long serialVersionUID = 1; // If seralization-incompatible changes are made to these classes, // then smarts can be added to these methods for backward compatibility. private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); } }
3,923
29.897638
96
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/Sampler.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import java.util.List; import cc.mallet.grmm.types.Assignment; import cc.mallet.grmm.types.FactorGraph; import cc.mallet.util.Randoms; /** * Interface for methods from sampling the distribution given by a graphical * model. * * Created: Mar 28, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: Sampler.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public interface Sampler { /** * Samples from the distribution of a given undirected model. * @param mdl Model to sample from * @param N Number of samples to generate * @return A list of assignments to the model. */ public Assignment sample (FactorGraph mdl, int N); /** * Sets the random seed used by this sampler. * @param r Random object to be used by this sampler. */ public void setRandom (Randoms r); }
1,309
30.190476
76
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/Inferencer.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference; import java.io.Serializable; import cc.mallet.grmm.types.*; /** * Interface implemented by all inferencers, which are algorithms for * computing (perhaps approximately) marginal distributions over * nodes in the model. * <P> * If you are implementing a new inferencer, you may wish to consider * subclassing {@link cc.mallet.grmm.inference.AbstractInferencer}, which implements this * interface. * <p> * Created: Wed Oct 1 11:18:09 2003 * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: Inferencer.java,v 1.1 2007/10/22 21:37:49 mccallum Exp $ */ public interface Inferencer extends Serializable { /** * Computes marginal distributions for a factor graph. * @throws UnsupportedOperationException If this inferencer does * not support undirected models (unlikely). */ public void computeMarginals (FactorGraph mdl); /** * Returns the computed marginal of a given variable. * Before using this method, <tt>computeMarginals</tt> must have * been previously called on the graphical model that contains <tt>v</tt>. * @see #computeMarginals(FactorGraph) */ public Factor lookupMarginal (Variable v); /** * Returns the computed marginal of a given clique in a graph. * Before using this method, <tt>computeMarginals</tt> must have * been previously called on the graphical model that contains the clique. * * @see #computeMarginals(cc.mallet.grmm.types.FactorGraph) * @see #computeMarginals(JunctionTree) * @throws UnsupportedOperationException If this inferencer does * not compute marginals for the size of clique given. */ public Factor lookupMarginal (VarSet varSet); /** * Returns the joint probability of a given assignment, * computed in some factorized fashion. * Before using this method, <tt>computeMarginals</tt> must have * been previously called on the graphical model that contains * the variables of <tt>assn</tt>. * @see #computeMarginals(cc.mallet.grmm.types.FactorGraph) * @see #computeMarginals(JunctionTree) */ public double lookupJoint (Assignment assn); /** * Returns the natural logarithm of the joint probability * of a given assignment, computed in some factorized fashion. * Before using this method, <tt>computeMarginals</tt> must have * been previously called on the graphical model that contains * the variables of <tt>assn</tt>. * <P> * This method is less likely to underflow than * <code>Math.log (lookupJoint (assn))</code>. * @see #computeMarginals(cc.mallet.grmm.types.FactorGraph) * @see #computeMarginals(JunctionTree) */ public double lookupLogJoint (Assignment assn); /** * Computes the marginal probability of a given assignment to * a small number of model variables. This may require one * run of computeMarginals() for each variable in the assignment; * if the assigment has many variables, it may be more efficient * to use lookupJoint. */ public double query (FactorGraph mdl, Assignment assn); public Inferencer duplicate (); void dump (); /** Outputs some measure of the total time spent in this inferencer. */ void reportTime (); } // Inferencer
3,636
34.656863
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/gbp/SparseMessageSender.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference.gbp; import java.util.Iterator; import cc.mallet.grmm.types.*; /** * Created: Jun 1, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: SparseMessageSender.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $ */ public class SparseMessageSender extends AbstractMessageStrategy { private double epsilon; public SparseMessageSender (double epsilon) { this.epsilon = epsilon; } public void sendMessage (RegionEdge edge) { Factor product = msgProduct (edge); for (Iterator it = edge.factorsToSend.iterator (); it.hasNext ();) { Factor ptl = (Factor) it.next (); product.multiplyBy (ptl); } TableFactor result = (TableFactor) product.marginalize (edge.to.vars); result.normalize (); TableFactor pruned; if (shouldPruneMessage (edge, result)) { // if (edge.to.vars.size() > 1) { pruned = Factors.retainMass (result, epsilon); pruned.normalize(); // System.err.println ("Potential pruning.\nPRE:"+result+"\nPOST:"+pruned); } else { // Only prune messages to leaves pruned = result; // System.err.println ("Message for edge "+edge+" not pruned."); } newMessages.setMessage (edge.from, edge.to, pruned); } public MessageArray averageMessages (RegionGraph rg, MessageArray a1, MessageArray a2, double inertiaWeight) { MessageArray arr = new MessageArray (rg); for (Iterator it = rg.edgeIterator (); it.hasNext ();) { RegionEdge edge = (RegionEdge) it.next (); Factor msg1 = a1.getMessage (edge.from, edge.to); Factor msg2 = a2.getMessage (edge.from, edge.to); if (msg1 != null) { TableFactor averaged = (TableFactor) Factors.average (msg1, msg2, inertiaWeight); TableFactor pruned; if (shouldPruneMessage (edge, averaged)) { pruned = Factors.retainMass (averaged, epsilon); } else { pruned = averaged; } arr.setMessage (edge.from, edge.to, pruned); } } // compute amount of sparsity int locs = 0; int idxs = 0; for (Iterator it = rg.edgeIterator (); it.hasNext ();) { RegionEdge edge = (RegionEdge) it.next (); DiscreteFactor msg = arr.getMessage (edge.from, edge.to); locs += msg.numLocations (); idxs += new HashVarSet (msg.varSet ()).weight (); } System.out.println ("Sparsity quotient = "+locs+" of "+idxs); return arr; } private boolean shouldPruneMessage (RegionEdge edge, Factor msg) { return edge.to.children.isEmpty (); } }
3,017
31.106383
110
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/gbp/BPRegionGenerator.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference.gbp; import java.util.Iterator; import cc.mallet.grmm.types.Factor; import cc.mallet.grmm.types.FactorGraph; import cc.mallet.grmm.types.Variable; /** * Created: May 30, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: BPRegionGenerator.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $ */ public class BPRegionGenerator implements RegionGraphGenerator { public RegionGraph constructRegionGraph (FactorGraph mdl) { RegionGraph rg = new RegionGraph (); for (Iterator it = mdl.factorsIterator (); it.hasNext();) { Factor ptl = (Factor) it.next (); if (ptl.varSet ().size() == 1) continue; // Single-node potentials handled separately Region parent = new Region (ptl); // Now add appropriate edges to region graph for (Iterator childIt = ptl.varSet().iterator (); childIt.hasNext();) { Variable var = (Variable) childIt.next (); Factor childPtl = mdl.factorOf (var); Region child = rg.findRegion (childPtl, true); //add node potential to parent if necessary if (childPtl != null) { parent.addFactor (childPtl); child.addFactor (childPtl); } rg.add (parent, child); } } rg.computeInferenceCaches (); return rg; } }
1,761
31.036364
92
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/gbp/RegionGraphGenerator.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference.gbp; import cc.mallet.grmm.types.FactorGraph; /** * Interface for strategies that construct region graphs from arbitrary graphical models. * They choose both which factors should be grouped into a region, and what the connectivity * between regions should be. * * Created: May 27, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: RegionGraphGenerator.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $ */ public interface RegionGraphGenerator { /** * Construct a region graph from an artbitrary model. * @param mdl Undirected Model to construct region graph from. */ RegionGraph constructRegionGraph (FactorGraph mdl); }
1,142
38.413793
93
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/gbp/Region.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference.gbp; import java.util.*; import cc.mallet.grmm.types.Factor; import cc.mallet.grmm.types.Variable; import gnu.trove.THashSet; /** * Created: May 27, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: Region.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $ */ class Region { // Collection of discrete potentials giving the factors of this region. Set factors; List parents; // List of parent regions List children; // List of child regions List vars; // All variables in region int index; boolean isRoot; int countingNumber; // cache for computing message passing strategy Set descendants; private Region () { children = new ArrayList (0); parents = new ArrayList (0); isRoot = true; index = -1; } Region (Variable var) { this (); factors = new THashSet (); vars = new ArrayList (1); vars.add (var); } Region (Factor ptl) { this (); factors = new THashSet (); factors.add (ptl); vars = new ArrayList (ptl.varSet ()); } Region (Variable[] vars, Factor[] factors) { this(); this.factors = new THashSet (Arrays.asList (factors)); this.vars = new ArrayList (Arrays.asList (vars)); } Region (Collection vars, Collection factors) { this(); this.factors = new THashSet (factors); this.vars = new ArrayList (vars); } Region (Collection vars) { this(); this.vars = new ArrayList (vars); factors = new THashSet (); } void addFactor (Factor ptl) { if (!factors.contains (ptl)) { factors.add (ptl); } } /* I think these were mistakes. public boolean equals (Object o) { if (this == o) return true; if (!(o instanceof Region)) return false; final Region region = (Region) o; if (factors != null ? !factors.equals (region.factors) : region.factors != null) return false; if (vars != null ? !vars.equals (region.vars) : region.vars != null) return false; return true; } public int hashCode () { int result; result = (factors != null ? factors.hashCode () : 0); result = 29 * result + (vars != null ? vars.hashCode () : 0); return result; } */ public String toString () { // No display of factors StringBuffer buf = new StringBuffer (); buf.append ("REGION["); for (Iterator it = vars.iterator (); it.hasNext ();) { Variable var = (Variable) it.next (); buf.append (var); if (it.hasNext ()) buf.append (" "); } buf.append ("] nf:"); buf.append (factors.size()); return buf.toString (); } }
3,072
23.007813
98
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/gbp/FactorizedRegion.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference.gbp; import java.util.List; import java.util.Iterator; import java.util.Collection; import java.util.Set; import cc.mallet.grmm.types.Factor; import cc.mallet.grmm.types.FactorGraph; import cc.mallet.grmm.types.Variable; import gnu.trove.THashSet; /** * A more space-efficient Region class that doesn't maintain a global factor * over all assignments to the region. * * Created: Jun 3, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: FactorizedRegion.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $ */ public class FactorizedRegion extends Region { FactorGraph subMdl; public FactorizedRegion (List factors) { super (varsForFactors (factors), factors); subMdl = new FactorGraph ((Variable[]) vars.toArray (new Variable[0])); for (Iterator it = factors.iterator (); it.hasNext ();) { Factor factor = (Factor) it.next (); subMdl.addFactor (factor); } } private static Collection varsForFactors (List factors) { Set vars = new THashSet (); for (Iterator it = factors.iterator (); it.hasNext ();) { Factor ptl = (Factor) it.next (); vars.addAll (ptl.varSet ()); } return vars; } }
1,663
29.254545
79
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/gbp/RegionEdge.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference.gbp; import java.util.List; import java.util.Set; import java.util.ArrayList; /** * Created: May 30, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: RegionEdge.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $ */ class RegionEdge { Region from; Region to; // List of factors in the parent region that are not in the child region List factorsToSend; // E(P)\E(R) in Yedidia notation. Note that this includes parent node. Set cousins; // N(from,to) in Yedida 2004 TR notation List neighboringParents; // D(from,to) in Yedida 2004 TR notation List loopingMessages; public RegionEdge (Region from, Region to) { this.from = from; this.to = to; } public boolean equals (Object o) { if (this == o) return true; if (!(o instanceof RegionEdge)) return false; final RegionEdge regionEdge = (RegionEdge) o; if (from != null ? !from.equals (regionEdge.from) : regionEdge.from != null) return false; if (to != null ? !to.equals (regionEdge.to) : regionEdge.to != null) return false; return true; } public int hashCode () { int result; result = (from != null ? from.hashCode () : 0); result = 29 * result + (to != null ? to.hashCode () : 0); return result; } void initializeFactorsToSend () { factorsToSend = new ArrayList (from.factors); factorsToSend.removeAll (to.factors); } public String toString () { return "EDGE:["+from+"-->"+to+"]"; } }
1,958
25.472973
94
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/gbp/RegionGraph.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference.gbp; import gnu.trove.THashSet; import java.util.*; import cc.mallet.grmm.types.Factor; import cc.mallet.grmm.types.VarSet; import cc.mallet.grmm.types.Variable; /** * Created: May 27, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: RegionGraph.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $ */ class RegionGraph { private Set regions = new THashSet (); private List edges = new ArrayList (); public RegionGraph () { } void add (Region parent, Region child) { if (!isConnected (parent, child)) { addRegion (parent); addRegion (child); child.isRoot = false; if (parent.children == null) parent.children = new ArrayList (); parent.children.add (child); if (child.parents == null) child.parents = new ArrayList (); child.parents.add (parent); edges.add (new RegionEdge (parent, child)); } } private boolean isConnected (Region parent, Region child) { return (parent.children.contains (child)); } private void addRegion (Region region) { if (regions.add (region)) { if (region.index != -1) { throw new IllegalArgumentException ("Region "+region+" has already been added to a different region graph."); } region.index = regions.size() - 1; } } int size () { return regions.size (); } Iterator iterator () { return regions.iterator (); } Iterator edgeIterator () { return edges.iterator (); } public void computeInferenceCaches () { computeDescendants (); includeDescendantFactors (); computeFactorsToSend (); computeCountingNumbers (); computeCousins (); computeNeighboringParents (); computeLoopingMessages (); // todo: Compute D(P,R) as well } private void includeDescendantFactors () { // Slightly inefficient: A recursive soln would be more efficient for (Iterator it = iterator (); it.hasNext();) { Region region = (Region) it.next (); for (Iterator dIt = region.descendants.iterator (); dIt.hasNext ();) { Region descendant = (Region) dIt.next (); // factors is a set, so it avoids duplicates region.factors.addAll (descendant.factors); } } } private void computeLoopingMessages () { for (Iterator it = edgeIterator (); it.hasNext();) { RegionEdge edge = (RegionEdge) it.next (); Region to = edge.to; List result = new ArrayList (); for (Iterator cousinIt = edge.cousins.iterator (); cousinIt.hasNext ();) { Region cousin = (Region) cousinIt.next (); if (cousin == edge.from) continue; for (Iterator edgeIt = cousin.children.iterator (); edgeIt.hasNext();) { Region cousinChild = (Region) edgeIt.next (); if (cousinChild == to || to.descendants.contains (cousinChild)) { result.add (findEdge (cousin, cousinChild)); } } } edge.loopingMessages = result; } } // computes region graph counting numbers as defined in Yedidia et al. private void computeCountingNumbers () { LinkedList queue = new LinkedList (); for (Iterator it = regions.iterator (); it.hasNext ();) { Region region = (Region) it.next (); if (region.isRoot) queue.add (region); } while (!queue.isEmpty()) { Region region = (Region) queue.removeFirst (); int parentCnt = 0; for (Iterator it = region.parents.iterator (); it.hasNext ();) { Region parent = (Region) it.next (); parentCnt += parent.countingNumber; } region.countingNumber = 1 - parentCnt; queue.addAll (region.children); } } private void computeFactorsToSend () { for (Iterator it = edges.iterator (); it.hasNext ();) { RegionEdge edge = (RegionEdge) it.next (); edge.initializeFactorsToSend (); } } private void computeCousins () { for (Iterator it = edgeIterator (); it.hasNext();) { RegionEdge edge = (RegionEdge) it.next (); Set cousins = new THashSet (edge.from.descendants); cousins.removeAll (edge.to.descendants); cousins.remove (edge.to); cousins.add (edge.from); edge.cousins = cousins; } } private void computeDescendants () { for (Iterator it = regions.iterator (); it.hasNext ();) { Region region = (Region) it.next (); if (region.isRoot) { computeDescendantsRec (region); } } } private void computeDescendantsRec (Region region) { Set descendants = new THashSet (region.children.size ()); // all region graphs are DAGs, so no infinite regress for (Iterator it = region.children.iterator (); it.hasNext();) { Region child = (Region) it.next (); computeDescendantsRec (child); descendants.add (child); descendants.addAll (child.descendants); } region.descendants = descendants; } private void computeNeighboringParents () { for (Iterator it = edgeIterator (); it.hasNext();) { RegionEdge edge = (RegionEdge) it.next (); edge.neighboringParents = new ArrayList (); List l = new LinkedList (regions); l.removeAll (edge.from.descendants); l.remove (edge.from); for (Iterator uncleIt = l.iterator (); uncleIt.hasNext ();) { Region uncle = (Region) uncleIt.next (); for (Iterator childIt = uncle.children.iterator (); childIt.hasNext();) { Region cousin = (Region) childIt.next (); if (edge.cousins.contains (cousin)) { edge.neighboringParents.add (findEdge (uncle, cousin)); } } } } } // horrifically inefficient private RegionEdge findEdge (Region uncle, Region cousin) { int idx = edges.indexOf (new RegionEdge (uncle, cousin)); return (RegionEdge) edges.get (idx); } public String toString () { StringBuffer buf = new StringBuffer (); buf.append ("REGION GRAPH\nRegions:\n"); for (Iterator it = regions.iterator (); it.hasNext ();) { Region region = (Region) it.next (); buf.append ("\n "); buf.append (region); } buf.append ("\nEdges:"); for (Iterator it = edges.iterator (); it.hasNext ();) { RegionEdge edge = (RegionEdge) it.next (); buf.append ("\n "); buf.append (edge.from); buf.append (" --> "); buf.append (edge.to); } buf.append ("\n"); return buf.toString (); } public boolean contains (Region region) { return regions.contains (region); } /** Returns the region in this graph whose factor list contains only * a given potential. * @param ptl * @param doCreate If true, an appropriate region will be created and added * to graph if none is found. * @return A region, or null if no region found and doCreate false. */ public Region findRegion (Factor ptl, boolean doCreate) { Set allVars = ptl.varSet (); for (Iterator it = regions.iterator (); it.hasNext ();) { Region region = (Region) it.next (); if (region.vars.size() == allVars.size() && region.vars.containsAll (allVars)) return region; } if (doCreate) { Region region = new Region (ptl); addRegion (region); return region; } else { return null; } } /** Returns the region in this graph whose variable list contains only * a given variable. * @param var * @param doCreate If true, an appropriate region will be created and added * to graph if none is found. * @return A region, or null if no region found and doCreate false. */ public Region findRegion (Variable var, boolean doCreate) { for (Iterator it = regions.iterator (); it.hasNext ();) { Region region = (Region) it.next (); if ((region.vars.size() == 1) && (region.vars.contains (var))) { return region; } } if (doCreate) { Region region = new Region (var); addRegion (region); return region; } else { return null; } } /** Finds the smallest region containing a given variable. * This might return a region that contains many extraneous variables. * @param variable * @return */ public Region findContainingRegion (Variable variable) { Region ret = null; for (Iterator it = regions.iterator (); it.hasNext ();) { Region region = (Region) it.next (); if (region.vars.contains (variable)) { if (ret == null || region.vars.size() < ret.vars.size ()) ret = region; } } return ret; } /** Finds the smallest region containing all the variables in a given set. * This might return a region that contains many extraneous variables. * @param varSet * @return */ public Region findContainingRegion (VarSet varSet) { Region ret = null; for (Iterator it = regions.iterator (); it.hasNext ();) { Region region = (Region) it.next (); if (region.vars.containsAll (varSet)) { if (ret == null || region.vars.size() < ret.vars.size ()) ret = region; } } return ret; } public int numEdges () { return edges.size (); } }
9,646
27.290323
117
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/gbp/MessageStrategy.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference.gbp; /** * Created: May 29, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: MessageStrategy.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $ */ public interface MessageStrategy { void sendMessage (RegionEdge edge); void setMessageArray (MessageArray oldMessages, MessageArray newMessages); MessageArray getOldMessages (); MessageArray getNewMessages (); MessageArray averageMessages (RegionGraph rg, MessageArray oldMessages, MessageArray newMessages, double weight); }
985
38.44
115
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/gbp/Kikuchi4SquareRegionGenerator.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference.gbp; import java.util.Iterator; import cc.mallet.grmm.types.Factor; import cc.mallet.grmm.types.FactorGraph; import cc.mallet.grmm.types.UndirectedGrid; import cc.mallet.grmm.types.Variable; import cc.mallet.util.ArrayUtils; /** * Created: May 31, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: Kikuchi4SquareRegionGenerator.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $ */ public class Kikuchi4SquareRegionGenerator implements RegionGraphGenerator { public RegionGraph constructRegionGraph (FactorGraph mdl) { if (mdl instanceof UndirectedGrid) { RegionGraph rg = new RegionGraph (); UndirectedGrid grid = (UndirectedGrid) mdl; // First set up regions for all for (int x = 0; x < grid.getWidth () - 1; x++) { for (int y = 0; y < grid.getHeight () - 1; y++) { Variable[] vars = new Variable[] { grid.get (x, y), grid.get (x+1, y), grid.get (x+1, y+1), grid.get (x, y+1), }; Factor[] edges = new Factor[] { mdl.factorOf (vars[0], vars[1]), mdl.factorOf (vars[1], vars[2]), mdl.factorOf (vars[2], vars[3]), mdl.factorOf (vars[0], vars[3]), }; // Create region for 4-clique Region fourSquare = new Region (vars, edges); // Create 1-clique region for (int i = 0; i < 4; i++) { Variable var = vars[i]; Factor ptl = mdl.factorOf (var); if (ptl != null) { fourSquare.factors.add (ptl); } } // Finally create edge regions, and connect to everyone else for (int i = 0; i < 4; i++) { Factor edgePtl = edges[i]; Region edgeRgn = rg.findRegion (edgePtl, true); rg.add (fourSquare, edgeRgn); Variable v1 = (Variable) edgeRgn.vars.get (0); Region nodeRgn = createVarRegion (rg, mdl, v1); edgeRgn.factors.addAll (nodeRgn.factors); rg.add (edgeRgn, nodeRgn); Variable v2 = (Variable) edgeRgn.vars.get (1); nodeRgn = createVarRegion (rg, mdl, v2); edgeRgn.factors.addAll (nodeRgn.factors); rg.add (edgeRgn, nodeRgn); } } } rg.computeInferenceCaches (); return rg; } else { throw new UnsupportedOperationException ("Kikuchi4SquareRegionGenerator requires that you use UndirectedGrid."); } } private Region createVarRegion (RegionGraph rg, FactorGraph mdl, Variable v1) { Factor ptl = mdl.factorOf (v1); if (ptl == null) { return rg.findRegion (v1, true); } else { return rg.findRegion (ptl, true); } } private void checkAllSingles (RegionGraph rg, Region[] nodeRegions) { for (Iterator it = rg.iterator (); it.hasNext ();) { Region region = (Region) it.next (); if (region.vars.size() == 1) { if (ArrayUtils.indexOf (nodeRegions, region) < 0) { throw new IllegalStateException ("huh?"); } } } } private void checkTooManyDoubles (RegionGraph rg, FactorGraph mdl) { int nv = mdl.factors ().size (); int doubles = 0; for (Iterator it = rg.iterator (); it.hasNext ();) { Region region = (Region) it.next (); if (region.vars.size() == 2) doubles++; } if (doubles > nv) { throw new IllegalStateException ("huh? "); } } }
3,937
30.007874
118
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/gbp/MessageArray.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference.gbp; import java.util.Iterator; import cc.mallet.grmm.types.*; /** * Created: May 29, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: MessageArray.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $ */ class MessageArray { private DiscreteFactor[][] messages; private MessageArray () {} public MessageArray (RegionGraph rg) { int size = rg.size (); messages = new TableFactor[size][size]; for (Iterator it = rg.iterator (); it.hasNext ();) { Region from = (Region) it.next (); for (Iterator it2 = from.children.iterator(); it2.hasNext ();) { Region to = (Region) it2.next (); DiscreteFactor ptl = new LogTableFactor (to.vars); // ptl.normalize (); messages[from.index][to.index] = ptl; } } } public MessageArray (TableFactor[][] messages) { this.messages = messages; } DiscreteFactor getMessage (Region from, Region to) { return messages[from.index][to.index]; } public void setMessage (Region from, Region to, TableFactor result) { messages[from.index][to.index] = result; } /** deep copy of messages */ public MessageArray duplicate () { MessageArray arr = new MessageArray (); arr.messages = new TableFactor[messages.length][messages.length]; for (int i = 0; i < messages.length; i++) { for (int j = 0; j < messages[i].length; j++) { if (messages[i][j] != null) { arr.messages[i][j] = (TableFactor) messages[i][j].duplicate (); } } } return arr; } public int size () { return messages.length; } public Factor getMessage (int i, int j) { return messages[i][j]; } }
2,160
27.064935
76
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/gbp/ParentChildGBP.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference.gbp; import java.util.logging.Logger; import java.util.logging.Level; import java.util.*; import cc.mallet.grmm.inference.AbstractInferencer; import cc.mallet.grmm.types.*; import cc.mallet.util.MalletLogger; import cc.mallet.util.Timing; /** * Created: May 27, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: ParentChildGBP.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $ */ public class ParentChildGBP extends AbstractInferencer { private static final Logger logger = MalletLogger.getLogger (ParentChildGBP.class.getName()); private static final boolean debug = false; private RegionGraphGenerator regioner; private MessageStrategy sender; private boolean useInertia = true; private double inertiaWeight = 0.5; // convergence criteria private static final double THRESHOLD = 1e-3; private static final int MAX_ITER = 500; // current inferencing state private MessageArray oldMessages; private MessageArray newMessages; private RegionGraph rg; private FactorGraph mdl; private ParentChildGBP () { } public ParentChildGBP (RegionGraphGenerator regioner) { this (regioner, new FullMessageStrategy ()); } public ParentChildGBP (RegionGraphGenerator regioner, MessageStrategy sender) { this.regioner = regioner; this.sender = sender; } public static ParentChildGBP makeBPInferencer () { ParentChildGBP inferencer = new ParentChildGBP (); inferencer.regioner = new BPRegionGenerator (); inferencer.sender = new FullMessageStrategy (); return inferencer; } public static ParentChildGBP makeKikuchiInferencer () { ParentChildGBP inferencer = new ParentChildGBP (); inferencer.regioner = new Kikuchi4SquareRegionGenerator (); inferencer.sender = new FullMessageStrategy (); return inferencer; } // accessors public boolean getUseInertia () { return useInertia; } public void setUseInertia (boolean useInertia) { this.useInertia = useInertia; } public double getInertiaWeight () { return inertiaWeight; } public void setInertiaWeight (double inertiaWeight) { this.inertiaWeight = inertiaWeight; } // inferencer interface public Factor lookupMarginal (Variable variable) { Region region = rg.findContainingRegion (variable); if (region == null) throw new IllegalArgumentException ("Could not find region containing variable "+variable+" in region graph "+rg); Factor belief = computeBelief (region); Factor varBelief = belief.marginalize (variable); return varBelief; } public Factor lookupMarginal (VarSet varSet) { Region region = rg.findContainingRegion (varSet); if (region == null) throw new IllegalArgumentException ("Could not find region containing clique "+varSet +" in region graph "+rg); Factor belief = computeBelief (region); Factor cliqueBelief = belief.marginalize (varSet); return cliqueBelief; } private Factor computeBelief (Region region) { return computeBelief (region, newMessages); } static Factor computeBelief (Region region, MessageArray messages) { DiscreteFactor result = new LogTableFactor(region.vars); for (Iterator it = region.factors.iterator(); it.hasNext();) { Factor factor = (Factor) it.next(); result.multiplyBy(factor); } for (Iterator it = region.parents.iterator(); it.hasNext();) { Region parent = (Region) it.next(); Factor msg = messages.getMessage(parent, region); result.multiplyBy(msg); } for (Iterator it = region.descendants.iterator(); it.hasNext();) { Region child = (Region) it.next(); for (Iterator it2 = child.parents.iterator(); it2.hasNext();) { Region uncle = (Region) it2.next(); if (uncle != region && !region.descendants.contains(uncle)) { result.multiplyBy(messages.getMessage(uncle, child)); } } } result.normalize(); return result; } public double lookupLogJoint (Assignment assn) { double factorProduct = mdl.logValue (assn); // value += computeFreeEnergy (rg); double F = computeFreeEnergy (rg); double value = factorProduct + F; if (debug) System.err.println ("GBP factor product:"+factorProduct+" + free energy: "+F+" = value:"+value); return value; } private double computeFreeEnergy (RegionGraph rg) { double avgEnergy = 0; double entropy = 0; for (Iterator it = rg.iterator (); it.hasNext();) { Region region = (Region) it.next(); Factor belief = computeBelief(region); double thisEntropy = belief.entropy(); if (debug) System.err.println("Region " + region + " c:" + region.countingNumber + " entropy:" + thisEntropy); entropy += region.countingNumber * thisEntropy; DiscreteFactor product = new LogTableFactor(belief.varSet()); for (Iterator ptlIt = region.factors.iterator(); ptlIt.hasNext();) { Factor ptl = (Factor) ptlIt.next(); product.multiplyBy(ptl); } double thisAvgEnergy = 0; for (AssignmentIterator assnIt = belief.assignmentIterator(); assnIt.hasNext();) { Assignment assn = assnIt.assignment(); // Note: Do not use assnIt here before fixing variable ordering issues. double thisEnergy = -product.logValue(assn); // double thisEnergy = product.phi (assnIt); double thisBel = belief.value(assn); thisAvgEnergy += thisBel * thisEnergy; assnIt.advance(); } if (debug) { System.err.println("Region " + region + " c:" + region.countingNumber + " avgEnergy: " + thisAvgEnergy); /* DiscretePotential b2 = belief.duplicate (); b2.delogify (); System.err.println ("BELIEF:"+b2); System.err.println ("ENERGY:"+product); */ } avgEnergy += region.countingNumber * thisAvgEnergy; } if (debug) System.err.println ("GBP computeFreeEnergy: avgEnergy:"+avgEnergy+" entropy:"+entropy+" free energy:"+(avgEnergy-entropy)); // return avgEnergy + entropy; return avgEnergy - entropy; } public void computeMarginals (FactorGraph mdl) { Timing timing = new Timing (); this.mdl = mdl; rg = regioner.constructRegionGraph (mdl); RegionEdge[] pairs = chooseMessageSendingOrder (); newMessages = new MessageArray (rg); timing.tick ("GBP Region Graph construction"); int iter = 0; do { oldMessages = newMessages; newMessages = oldMessages.duplicate (); sender.setMessageArray (oldMessages, newMessages); for (int i = 0; i < pairs.length; i++) { RegionEdge edge = pairs[i]; sender.sendMessage (edge); } if (logger.isLoggable (Level.FINER)) { timing.tick ("GBP iteration "+iter); } iter++; if (useInertia) newMessages = sender.averageMessages (rg, oldMessages, newMessages, inertiaWeight); } while (!hasConverged () && (iter < MAX_ITER)); logger.info ("GBP: Used "+iter+" iterations."); if (iter >= MAX_ITER) { logger.warning ("***WARNING: GBP not converged!"); } } private RegionEdge[] chooseMessageSendingOrder () { List l = new ArrayList (); for (Iterator it = rg.edgeIterator (); it.hasNext();) { RegionEdge edge = (RegionEdge) it.next (); l.add (edge); } Collections.sort (l, new Comparator () { public int compare (Object o1, Object o2) { RegionEdge e1 = (RegionEdge) o1; RegionEdge e2 = (RegionEdge) o2; int l1 = e1.to.vars.size(); int l2 = e2.to.vars.size(); return Double.compare (l1, l2); }; }); return (RegionEdge[]) l.toArray (new RegionEdge [l.size()]); } private boolean hasConverged () { for (Iterator it = rg.edgeIterator (); it.hasNext();) { RegionEdge edge = (RegionEdge) it.next (); Factor oldMsg = oldMessages.getMessage (edge.from, edge.to); Factor newMsg = newMessages.getMessage (edge.from, edge.to); if (oldMsg == null) { assert newMsg == null; } else { if (!oldMsg.almostEquals (newMsg, THRESHOLD)) { /* //xxx debug if (sender instanceof SparseMessageSender) System.out.println ("NOT CONVERGED:\n"+newMsg+"\n......."); */ return false; } } } return true; } public void dump () { for (Iterator it = rg.edgeIterator (); it.hasNext();) { RegionEdge edge = (RegionEdge) it.next (); Factor newMsg = newMessages.getMessage (edge.from, edge.to); System.out.println ("Message: "+edge.from+" --> "+edge.to+" "+newMsg); } } }
9,205
27.590062
131
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/gbp/AbstractMessageStrategy.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference.gbp; import java.util.Iterator; import cc.mallet.grmm.types.Factor; import cc.mallet.grmm.types.LogTableFactor; import cc.mallet.grmm.types.TableFactor; /** * Created: May 29, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: AbstractMessageStrategy.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $ */ public abstract class AbstractMessageStrategy implements MessageStrategy { protected MessageArray oldMessages; protected MessageArray newMessages; public void setMessageArray (MessageArray oldMessages, MessageArray newMessages) { this.oldMessages = oldMessages; this.newMessages = newMessages; } public MessageArray getOldMessages () { return oldMessages; } public MessageArray getNewMessages () { return newMessages; } Factor msgProduct (RegionEdge edge) { Factor product = new LogTableFactor (edge.from.vars); for (Iterator it = edge.neighboringParents.iterator (); it.hasNext ();) { RegionEdge otherEdge = (RegionEdge) it.next (); Factor otherMsg = oldMessages.getMessage (otherEdge.from, otherEdge.to); product.multiplyBy (otherMsg); } for (Iterator it = edge.loopingMessages.iterator (); it.hasNext ();) { RegionEdge otherEdge = (RegionEdge) it.next (); Factor otherMsg = newMessages.getMessage (otherEdge.from, otherEdge.to); product.divideBy (otherMsg); } return product; } }
1,901
29.190476
86
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/gbp/ClusterVariationalRegionGenerator.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference.gbp; import java.util.*; import java.util.logging.Logger; import cc.mallet.grmm.types.Factor; import cc.mallet.grmm.types.FactorGraph; import cc.mallet.grmm.types.UndirectedGrid; import cc.mallet.grmm.types.Variable; import cc.mallet.util.CollectionUtils; import cc.mallet.util.MalletLogger; /** * Created: Jun 1, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: ClusterVariationalRegionGenerator.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $ */ public class ClusterVariationalRegionGenerator implements RegionGraphGenerator { private static final Logger logger = MalletLogger.getLogger (ClusterVariationalRegionGenerator.class.getName()); private static final boolean debug = false; private BaseRegionComputer regionComputer; public ClusterVariationalRegionGenerator () { this (new ByFactorRegionComputer ()); } public ClusterVariationalRegionGenerator (BaseRegionComputer regionComputer) { this.regionComputer = regionComputer; } public RegionGraph constructRegionGraph (FactorGraph mdl) { RegionGraph rg = new RegionGraph (); int depth = 0; List baseRegions = regionComputer.computeBaseRegions (mdl); List theseRegions = baseRegions; while (!theseRegions.isEmpty ()) { if (debug) System.out.println ("Depth 0 regions:\n"+CollectionUtils.dumpToString (theseRegions, "\n ")); List overlaps = computeOverlaps (theseRegions); addEdgesForOverlaps (rg, theseRegions, overlaps); theseRegions = overlaps; depth++; } rg.computeInferenceCaches (); logger.info ("ClusterVariationalRegionGenerator: Number of regions "+rg.size()+" Number of edges:"+rg.numEdges()); return rg; } private List computeOverlaps (List regions) { List overlaps = new ArrayList (); for (Iterator it1 = regions.iterator (); it1.hasNext ();) { Region r1 = (Region) it1.next (); for (Iterator it2 = regions.iterator (); it2.hasNext ();) { Region r2 = (Region) it2.next (); if (r1 != r2) { Collection intersection = CollectionUtils.intersection (r1.vars, r2.vars); if (!intersection.isEmpty () && !anySubsumes (overlaps, intersection)) { Collection ptlSet = CollectionUtils.intersection (r1.factors, r2.factors); Variable[] vars = (Variable[]) intersection.toArray (new Variable[intersection.size ()]); Factor[] ptls = (Factor[]) ptlSet.toArray (new Factor [ptlSet.size ()]); Region r = new Region (vars, ptls); overlaps.add (r); } } } } // We can still have subsumed regions in the list if the smaller region was added first. for (ListIterator it = overlaps.listIterator (); it.hasNext ();) { Region region = (Region) it.next (); List otherRegions = overlaps.subList (it.nextIndex (), overlaps.size ()); if (anySubsumes (otherRegions, region.vars)) { it.remove (); } } return overlaps; } /** Returns true if any region in regions contains all the variables in vars. */ private boolean anySubsumes (List regions, Collection vars) { for (Iterator it = regions.iterator (); it.hasNext ();) { Region region = (Region) it.next (); if (region.vars.containsAll (vars)) return true; } return false; } private void addEdgesForOverlaps (RegionGraph rg, List fromList, List toList) { for (Iterator fromIt = fromList.iterator (); fromIt.hasNext ();) { Region from = (Region) fromIt.next (); for (Iterator toIt = toList.iterator (); toIt.hasNext ();) { Region to = (Region) toIt.next (); if (from.vars.containsAll (to.vars)) { rg.add (from, to); } } } } // computing base regions public static void removeSubsumedRegions (List regions) { for (ListIterator it = regions.listIterator (); it.hasNext ();) { Region region = (Region) it.next (); for (Iterator it2 = regions.iterator (); it2.hasNext();) { Region r2 = (Region) it2.next (); if (r2 != region && r2.vars.size() >= region.vars.size ()) { if (r2.vars.containsAll (region.vars)) { it.remove (); break; } } } } } public static void addAllFactors (FactorGraph mdl, List regions) { for (Iterator it = regions.iterator (); it.hasNext ();) { Region region = (Region) it.next (); for (Iterator pIt = mdl.factorsIterator (); pIt.hasNext();) { Factor ptl = (Factor) pIt.next (); if (region.vars.containsAll (ptl.varSet ())) { region.factors.add (ptl); } } } } public static interface BaseRegionComputer { /** * Returns a list of top-level regions for use in the cluster variational method. * @param mdl An undirected model. * @return A list of regions. No region in the list may subsume another. */ List computeBaseRegions (FactorGraph mdl); } /** * Region computer where each top-level region consists of a single factor node. * If the model is pairwise, this is equivalent to using the Bethe free energy. */ public static class ByFactorRegionComputer implements BaseRegionComputer { public List computeBaseRegions (FactorGraph mdl) { List regions = new ArrayList (mdl.factors ().size ()); for (Iterator it = mdl.factorsIterator (); it.hasNext ();) { Factor ptl = (Factor) it.next (); regions.add (new Region (ptl)); } removeSubsumedRegions (regions); addAllFactors (mdl, regions); return regions; } } public static class Grid2x2RegionComputer implements BaseRegionComputer { public List computeBaseRegions (FactorGraph mdl) { List regions = new ArrayList (); UndirectedGrid grid = (UndirectedGrid) mdl; for (int x = 0; x < grid.getWidth() - 1; x++) { for (int y = 0; y < grid.getHeight() - 1; y++) { Variable[] vars = new Variable[] { grid.get (x, y), grid.get (x, y+1), grid.get (x+1, y+1), grid.get (x+1, y), }; regions.add (new Region (vars, new Factor[0])); } } addAllFactors (mdl, regions); return regions; } } }
6,797
31.066038
118
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/inference/gbp/FullMessageStrategy.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.grmm.inference.gbp; import java.util.Iterator; import cc.mallet.grmm.types.*; /** * A first implementation of MessageStrategy that assumes that a BP region graph * is being used. * * Created: May 29, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: FullMessageStrategy.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $ */ public class FullMessageStrategy extends AbstractMessageStrategy { private static final boolean debug = false; private static final boolean debugLite = false; public FullMessageStrategy () { } public void sendMessage (RegionEdge edge) { if (debugLite) { System.err.println ("Sending message "+edge); } Factor product = msgProduct (edge); Region from = edge.from; Region to = edge.to; if (debug) System.err.println ("Message "+from+" --> "+to+" after msgProduct: "+product); for (Iterator it = edge.factorsToSend.iterator (); it.hasNext ();) { Factor ptl = (Factor) it.next (); product.multiplyBy (ptl); } TableFactor result = (TableFactor) product.marginalize (to.vars); result.normalize (); if (debug) { System.err.println ("Final message "+edge+":"+result); } newMessages.setMessage (from, to, result); } /* static void multiplyEdgeFactors (RegionEdge edge, DiscretePotential product) { for (Iterator it = edge.factorsToSend.iterator (); it.hasNext ();) { DiscretePotential ptl = (DiscretePotential) it.next (); if (debug) System.err.println ("Message "+edge+" multiplying by: "+ptl); product.multiplyBy (ptl); } } */ // debugging function private boolean willBeNaN (Factor product, Factor otherMsg) { Factor p2 = product.duplicate (); p2.divideBy (otherMsg); return p2.isNaN (); } // debugging function private boolean willBeNaN2 (Factor product, Factor otherMsg) { Factor p2 = product.duplicate (); p2.multiplyBy (otherMsg); return p2.isNaN (); } public MessageArray averageMessages (RegionGraph rg, MessageArray a1, MessageArray a2, double inertiaWeight) { MessageArray arr = new MessageArray (rg); for (Iterator it = rg.edgeIterator (); it.hasNext ();) { RegionEdge edge = (RegionEdge) it.next (); DiscreteFactor msg1 = a1.getMessage (edge.from, edge.to); DiscreteFactor msg2 = a2.getMessage (edge.from, edge.to); if (msg1 != null) { TableFactor averaged = (TableFactor) Factors.average (msg1, msg2, inertiaWeight); arr.setMessage (edge.from, edge.to, averaged); } } return arr; } }
3,069
27.962264
110
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/PerFieldF1Evaluator.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import java.io.PrintStream; import java.io.OutputStream; import java.io.PrintWriter; import java.text.DecimalFormat; import java.util.Iterator; import cc.mallet.types.Label; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.MatrixOps; /** * Created: Oct 8, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: PerFieldF1Evaluator.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class PerFieldF1Evaluator implements ExtractionEvaluator { private FieldComparator comparator = new ExactMatchComparator (); private PrintStream errorOutputStream = null; public FieldComparator getComparator () { return comparator; } public void setComparator (FieldComparator comparator) { this.comparator = comparator; } public PrintStream getErrorOutputStream () { return errorOutputStream; } public void setErrorOutputStream (OutputStream errorOutputStream) { this.errorOutputStream = new PrintStream (errorOutputStream); } public void evaluate (Extraction extraction) { evaluate ("", extraction, System.out); } // Assumes that there are as many records as documents, indexed by docs. // Assumes that extractor returns at most one value public void evaluate (String description, Extraction extraction, PrintStream out) { int numDocs = extraction.getNumDocuments (); assert numDocs == extraction.getNumRecords (); LabelAlphabet dict = extraction.getLabelAlphabet(); int numLabels = dict.size(); int[] numCorr = new int [numLabels]; int[] numPred = new int [numLabels]; int[] numTrue = new int [numLabels]; for (int docnum = 0; docnum < numDocs; docnum++) { Record extracted = extraction.getRecord (docnum); Record target = extraction.getTargetRecord (docnum); // Calc precision Iterator it = extracted.fieldsIterator (); while (it.hasNext ()) { Field predField = (Field) it.next (); Label name = predField.getName (); Field trueField = target.getField (name); int idx = name.getIndex (); for (int j = 0; j < predField.numValues(); j++) { numPred [idx]++; if (trueField != null && trueField.isValue (predField.value (j), comparator)) { numCorr [idx]++; } else { // We have an error, report if necessary (this should be moved to the per-field rather than per-filler level.) if (errorOutputStream != null) { //xxx TODO: Display name of supporting document errorOutputStream.println ("Error in extraction!"); errorOutputStream.println ("Predicted "+predField); errorOutputStream.println ("True "+trueField); errorOutputStream.println (); } } } } // Calc true it = target.fieldsIterator (); while (it.hasNext ()) { Field trueField = (Field) it.next (); Label name = trueField.getName (); numTrue [name.getIndex ()] += trueField.numValues (); } } out.println (description+" SEGMENT counts"); out.println ("Name\tCorrect\tPred\tTarget"); for (int i = 0; i < numLabels; i++) { Label name = dict.lookupLabel (i); out.println (name+"\t"+numCorr[i]+"\t"+numPred[i]+"\t"+numTrue[i]); } out.println (); DecimalFormat f = new DecimalFormat ("0.####"); double totalF1 = 0; int totalFields = 0; out.println (description+" per-field F1"); out.println ("Name\tP\tR\tF1"); for (int i = 0; i < numLabels; i++) { double P = (numPred[i] == 0) ? 0 : ((double)numCorr[i]) / numPred [i]; double R = (numTrue[i] == 0) ? 1 : ((double)numCorr[i]) / numTrue [i]; double F1 = (P + R == 0) ? 0 : (2 * P * R) / (P + R); if ((numPred[i] > 0) || (numTrue[i] > 0)) { totalF1 += F1; totalFields++; } Label name = dict.lookupLabel (i); out.println (name+"\t"+f.format(P)+"\t"+f.format(R)+"\t"+f.format(F1)); } int totalCorr = MatrixOps.sum (numCorr); int totalPred = MatrixOps.sum (numPred); int totalTrue = MatrixOps.sum (numTrue); double P = ((double)totalCorr) / totalPred; double R = ((double)totalCorr) / totalTrue; double F1 = (2 * P * R) / (P + R); out.println ("OVERALL (micro-averaged) P="+f.format(P)+" R="+f.format(R)+" F1="+f.format(F1)); out.println ("OVERALL (macro-averaged) F1="+f.format(totalF1/totalFields)); out.println(); } }
4,970
32.587838
122
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/Extractor.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.extract; import java.io.Serializable; import java.util.Iterator; import cc.mallet.pipe.Pipe; import cc.mallet.pipe.iterator.PipeInputIterator; import cc.mallet.types.Alphabet; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.Instance; // Analogous to base.classify.Classifier /** * Generic interface for objects that do information extraction. * Typically, this will mean extraction of database records * (see @link{Record}) from Strings, but this interface is not * specific to this case. */ //TODO: Possibly in the future, create Document and Corpus objects. // (This would allow calling an extractor on multiple documents in a type-safe manner. public interface Extractor extends Serializable { /** * Performs extraction given a raw object. The object will * be passed through the Extractor's pipe. * @param o The document to extract from (often a String). * @return Extraction the results of performing extraction */ public Extraction extract (Object o); /** * Performs extraction from an object that has been * already been tokenized. This method will pass spans * through the extractor's pipe. * @param toks A tokenized document * @return Extraction the results of performing extraction */ public Extraction extract (Tokenization toks); /** * Performs extraction on a a set of raw documents. The * Instances output from source will be passed through * both the tokentization pipe and the feature extraction * pipe. * @param source A source of raw documents * @return Extraction the results of performing extraction */ public Extraction extract (Iterator<Instance> source); /** * Returns the pipe used by this extractor for. The pipe * takes an Instance and converts it into a form usable * by the particular extraction algorithm. This pipe expects * the Instance's data field to be a Tokenization. For example, * pipes often perform feature extraction. The type of * raw object expected by the pipe depends on the particular * subclass of extractor. * @return a pipe */ public Pipe getFeaturePipe (); /** * Returns the pipe used by this extractor to tokenize the input. * The type of Instance of this pipe expects is specific to the * individual extractor. This pipe will return an Instance whose * data is a Tokenization. * @return a pipe */ public Pipe getTokenizationPipe (); /** * Sets the pipe used by this extractor for tokenization. The pipe should * takes a raw object and convert it into a Tokenization. * <P> * The pipe @link{edu.umass.cs.mallet.base.pipe.CharSequence2TokenSequence} is an * example of a pipe that could be used here. */ public void setTokenizationPipe (Pipe pipe); /** * Returns an alphabet of the features used by the extractor. * The alphabet maps strings describing the features to indices. * @return the input alphabet */ public Alphabet getInputAlphabet (); /** * Returns an alphabet of the labels used by the extractor. * Labels include entity types (such as PERSON) and slot * names (such as EMPLOYEE-OF). * @return the target alphabet */ public LabelAlphabet getTargetAlphabet (); }
3,809
32.716814
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/BIOTokenizationFilterWithTokenIndices.java
package cc.mallet.extract; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; public class BIOTokenizationFilterWithTokenIndices extends BIOTokenizationFilter { protected Span createSpan(Tokenization input, int startTokenIdx, int endTokenIdx) { StringSpan span = (StringSpan) input .subspan(startTokenIdx, endTokenIdx); span.setProperty("StartTokenIdx", new Integer(startTokenIdx)); span.setProperty("EndTokenIdx", new Integer(endTokenIdx-1)); return span; } // Serialization garbage private static final long serialVersionUID = 1L; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject(ObjectOutputStream out) throws IOException { out.defaultWriteObject(); out.writeInt(CURRENT_SERIAL_VERSION); } private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject(); in.readInt(); // read version } }
963
25.054054
70
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/LabeledSpans.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import cc.mallet.types.ArrayListSequence; import cc.mallet.types.Label; /** * Created: Oct 31, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: LabeledSpans.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class LabeledSpans extends ArrayListSequence { private Object document; public LabeledSpans (Object document) { this.document = document; } public Object getDocument () { return document; } public Label getLabel (int i) { LabeledSpan span = (LabeledSpan) get (i); return span.getLabel (); } public Span getSpan (int i) { return (Span) get (i); } public LabeledSpan getLabeledSpan (int i) { return (LabeledSpan) get (i); } // Serialization garbage private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); out.writeInt (CURRENT_SERIAL_VERSION); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); int version = in.readInt (); } }
1,752
23.013699
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/StringTokenization.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.extract; import java.io.ObjectOutputStream; import java.io.ObjectInputStream; import java.io.IOException; import cc.mallet.types.TokenSequence; import cc.mallet.util.CharSequenceLexer; public class StringTokenization extends TokenSequence implements Tokenization { private CharSequence document; /** Create an empty StringTokenization */ public StringTokenization (CharSequence seq) { document = seq; } /** * Creates a tokenization of the given string. Tokens are * added from all the matches of the given lexer. */ public StringTokenization (CharSequence string, CharSequenceLexer lexer) { super(); this.document = string; lexer.setCharSequence (string); while (lexer.hasNext()) { lexer.next (); this.add (new StringSpan (string, lexer.getStartOffset(), lexer.getEndOffset())); } } //xxx Refactor into AbstractTokenization public Span subspan (int firstToken, int lastToken) { StringSpan firstSpan = (StringSpan) get(firstToken); int startIdx = firstSpan.getStartIdx (); int endIdx; if (lastToken > size()) { endIdx = document.length (); } else { StringSpan lastSpan = (StringSpan) get(lastToken - 1); endIdx = lastSpan.getEndIdx (); } return new StringSpan (document, startIdx, endIdx); } public Span getSpan (int i) { return (Span) get(i); } public Object getDocument () { return document; } // Serialization garbage private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); out.writeInt (CURRENT_SERIAL_VERSION); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); int version = in.readInt (); } }
2,434
24.904255
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/ConfidenceTokenizationFilter.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import java.io.Serializable; import java.io.ObjectOutputStream; import java.io.IOException; import java.io.ObjectInputStream; import cc.mallet.fst.confidence.*; import cc.mallet.types.Label; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.Sequence; /** * Created: Oct 26, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> */ public class ConfidenceTokenizationFilter implements TokenizationFilter, Serializable { ExtractionConfidenceEstimator confidenceEstimator; TokenizationFilter underlyingFilter; public ConfidenceTokenizationFilter (ExtractionConfidenceEstimator confidenceEstimator, TokenizationFilter underlyingFilter) { super(); this.confidenceEstimator = confidenceEstimator; this.underlyingFilter = underlyingFilter; } public LabeledSpans constructLabeledSpans (LabelAlphabet dict, Object document, Label backgroundTag, Tokenization input, Sequence seq) { DocumentExtraction extraction = new DocumentExtraction("Extraction", dict, input, seq, null, backgroundTag.toString()); confidenceEstimator.estimateConfidence(extraction); return extraction.getExtractedSpans(); } // Serialization garbage private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject(confidenceEstimator); out.writeObject(underlyingFilter); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.readInt (); // read version this.confidenceEstimator = (ExtractionConfidenceEstimator) in.readObject(); this.underlyingFilter = (TokenizationFilter) in.readObject(); } }
2,623
35.957746
102
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/AccuracyCoverageEvaluator.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import java.io.PrintStream; import java.io.OutputStream; import java.io.PrintWriter; import java.text.DecimalFormat; import java.util.Iterator; import java.util.Vector; import cc.mallet.fst.confidence.ConfidenceEvaluator; import cc.mallet.types.Label; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.MatrixOps; /** * Constructs Accuracy-coverage graph using confidence values to sort Fields. * * Created: Nov 8, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> */ public class AccuracyCoverageEvaluator implements ExtractionEvaluator { private int numberBins; private FieldComparator comparator = new ExactMatchComparator (); private PrintStream errorOutputStream = null; public AccuracyCoverageEvaluator (int numberBins) { this.numberBins = 20; } public FieldComparator getComparator () { return comparator; } public void setComparator (FieldComparator comparator) { this.comparator = comparator; } public PrintStream getErrorOutputStream () { return errorOutputStream; } public void setErrorOutputStream (OutputStream errorOutputStream) { this.errorOutputStream = new PrintStream (errorOutputStream); } public void evaluate (Extraction extraction) { evaluate ("", extraction, System.out); } // Assumes that there are as many records as documents, indexed by docs. // Assumes that extractor returns at most one value public void evaluate (String description, Extraction extraction, PrintStream out) { int numDocs = extraction.getNumDocuments (); assert numDocs == extraction.getNumRecords (); Vector entityConfidences = new Vector(); int numTrueValues = 0; int numPredValues = 0; int numCorrValues = 0; for (int docnum = 0; docnum < numDocs; docnum++) { Record extracted = extraction.getRecord (docnum); Record target = extraction.getTargetRecord (docnum); Iterator it = extracted.fieldsIterator (); while (it.hasNext ()) { Field predField = (Field) it.next (); Field trueField = target.getField (predField.getName()); if (predField != null) numPredValues += predField.numValues(); for (int j = 0; j < predField.numValues(); j++) { LabeledSpan span = predField.span(j); boolean correct = (trueField != null && trueField.isValue (predField.value (j), comparator)); entityConfidences.add(new ConfidenceEvaluator.EntityConfidence (span.getConfidence(), correct, span.getText())); if (correct) numCorrValues++; } } it = target.fieldsIterator (); while (it.hasNext ()) { Field trueField = (Field) it.next (); numTrueValues += trueField.numValues (); } } ConfidenceEvaluator evaluator = new ConfidenceEvaluator(entityConfidences, this.numberBins); out.println("correlation: " + evaluator.correlation()); out.println("avg precision: " + evaluator.getAveragePrecision()); out.println("coverage\taccuracy:\n" + evaluator.accuracyCoverageValuesToString()); double[] ac = evaluator.getAccuracyCoverageValues(); for (int i=0; i < ac.length; i++) { int marks = (int)(ac[i]*25.0); for (int j=0; j < marks; j++) out.print("*"); out.println(); } out.println("nTrue:" + numTrueValues + " nCorr:" + numCorrValues + " nPred:" + numPredValues + "\n"); out.println("recall\taccuracy:\n" + evaluator.accuracyRecallValuesToString(numTrueValues)); } }
4,022
32.525
105
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/BIOTokenizationFilter.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import java.io.Serializable; import java.io.ObjectOutputStream; import java.io.IOException; import java.io.ObjectInputStream; import cc.mallet.types.Label; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.Sequence; /** * Created: Nov 12, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: BIOTokenizationFilter.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class BIOTokenizationFilter implements TokenizationFilter, Serializable { public LabeledSpans constructLabeledSpans (LabelAlphabet dict, Object document, Label backgroundTag, Tokenization input, Sequence seq) { LabeledSpans labeled = new LabeledSpans (document); addSpansFromTags (labeled, input, seq, dict, backgroundTag); return labeled; } private void addSpansFromTags (LabeledSpans labeled, Tokenization input, Sequence tags, LabelAlphabet dict, Label backgroundTag) { int i = 0; int docidx = 0; while (i < tags.size ()) { Label thisTag = dict.lookupLabel (tags.get (i).toString ()); int startTokenIdx = i; while (++i < tags.size ()) { Label nextTag = dict.lookupLabel (tags.get (i).toString ()); if (isBeginTag (nextTag) || !tagsMatch (thisTag, nextTag)) break; } int endTokenIdx = i; Span span = createSpan (input, startTokenIdx, endTokenIdx); addBackgroundIfNecessary (labeled, (StringSpan) span, docidx, backgroundTag); docidx = ((StringSpan) span).getEndIdx (); if (isBeginTag (thisTag) || isInsideTag (thisTag)) { thisTag = trimTag (dict, thisTag); } labeled.add (new LabeledSpan (span, thisTag, thisTag == backgroundTag)); } } protected Span createSpan (Tokenization input, int startTokenIdx, int endTokenIdx) { return input.subspan (startTokenIdx, endTokenIdx); } private Label trimTag (LabelAlphabet dict, Label tag) { String name = (String) tag.getEntry (); return dict.lookupLabel (name.substring (2)); } private boolean tagsMatch (Label tag1, Label tag2) { String name1 = (String) tag1.getEntry (); String name2 = (String) tag2.getEntry (); if (isBeginTag (tag1) || isInsideTag (tag1)) { name1 = name1.substring (2); } if (isInsideTag (tag2)) { name2 = name2.substring (2); } return name1.equals (name2); } private boolean isBeginTag (Label lbl) { String name = (String) lbl.getEntry (); return name.startsWith ("B-"); } private boolean isInsideTag (Label lbl) { String name = (String) lbl.getEntry (); return name.startsWith ("I-"); } private void addBackgroundIfNecessary (LabeledSpans labeled, StringSpan span, int docidx, Label background) { int nextIdx = span.getStartIdx (); if (docidx < nextIdx) { Span newSpan = new StringSpan ((CharSequence) span.getDocument (), docidx, nextIdx); labeled.add (new LabeledSpan (newSpan, background, true)); } } // Serialization garbage private static final long serialVersionUID = -8726127297313150023L; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); out.writeInt (CURRENT_SERIAL_VERSION); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); in.readInt (); // read version } }
3,977
29.837209
109
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/PunctuationIgnoringComparator.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import java.util.regex.Pattern; import java.util.regex.Matcher; /** * Created: Nov 23, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: PunctuationIgnoringComparator.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class PunctuationIgnoringComparator implements FieldComparator { private Pattern punctuationPattern = Pattern.compile ("\\p{Punct}*$"); public void setPunctuationPattern (Pattern punctuationPattern) { this.punctuationPattern = punctuationPattern; } public boolean matches (String fieldVal1, String fieldVal2) { String trim1 = doTrim (fieldVal1); String trim2 = doTrim (fieldVal2); return trim1.equals (trim2); } private String doTrim (String str) { return punctuationPattern.matcher (str).replaceAll (""); } }
1,276
30.925
92
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/LatticeViewer.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import java.io.*; import java.text.DecimalFormat; import java.util.List; import cc.mallet.fst.CRF; import cc.mallet.fst.MaxLattice; import cc.mallet.fst.MaxLatticeDefault; import cc.mallet.fst.SumLatticeDefault; import cc.mallet.fst.Transducer; import cc.mallet.types.*; /** * Created: Oct 31, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: LatticeViewer.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class LatticeViewer { private static final int FEATURE_CUTOFF_PCT = 25; private static final int LENGTH = 10; static void lattice2html (PrintStream out, ExtorInfo info) { PrintWriter writer = new PrintWriter (new OutputStreamWriter (out), true); lattice2html (writer, info); } // if lattice == null, no alpha, beta values printed static void lattice2html (PrintWriter out, ExtorInfo info) { assert (info.target.size() == info.predicted.size()); assert (info.input.size() == info.predicted.size()); int N = info.target.size(); for (int start = 0; start < N; start += LENGTH - 1) { int end = Math.min (N, start + LENGTH); if (!allSeqMatches (info.predicted, info.target, start, end)) { error2html (out, info, start, end); } } } private static void writeHeader (PrintWriter out) { out.println ("<html><head><title>ERROR OUTPUT</title>\n<link rel=\"stylesheet\" href=\"errors.css\" type=\"text/css\" />\n</head><body>"); } private static void writeFooter (PrintWriter out) { out.println ("</body></html>"); } // Display HTML for one error private static void error2html (PrintWriter out, ExtorInfo info, int start, int end) { String anchor = info.idx+":"+start+":"+end; out.println ("<p><A NAME=\""+anchor+"\">"); out.println ("<p>Instance "+info.desc+" Position "+start+"..."+end); if (info.link != null) { out.println ("<a href=\""+info.link+"#"+anchor+"\">[Lattice]</a>"); } out.println ("</p>"); out.println ("<table>"); outputIndices (out, start, end); outputInputRow (out, info.input, start, end); outputTableRow (out, "target", info.target, info.predicted, start, end); outputTableRow (out, "predicted", info.predicted, info.target, start, end); if (info.lattice != null) { outputLatticeRows (out, info.lattice, start, end); outputTransitionCosts (out, info, start, end); outputFeatures (out, info.fvs, info.predicted, info.target, start, end); } out.println ("</table>"); } public static int numMaxViterbi = 5; private static void outputLatticeRows (PrintWriter out, MaxLattice lattice, int start, int end) { DecimalFormat f = new DecimalFormat ("0.##"); Transducer ducer = lattice.getTransducer (); int max = Math.min (numMaxViterbi, ducer.numStates()); List<Sequence<Transducer.State>> stateSequences = lattice.bestStateSequences(max); for (int k = 0; k < max; k++) { out.println (" <tr class=\"delta\">"); out.println (" <td class=\"label\">&delta; rank "+k+"</td>"); for (int ip = start; ip < end; ip++) { Transducer.State state = stateSequences.get(k).get(ip+1); if (state.getName().equals (lattice.bestOutputSequence().get(ip))) { out.print ("<td class=\"viterbi\">"); } else { out.print ("<td>"); } out.print (state.getName()+"<br />"+f.format (-lattice.getDelta (ip+1, state.getIndex ()))+"</td>"); } out.println ("</tr>"); } } private static int numFeaturesToDisplay = 5; public static int getNumFeaturesToDisplay () { return numFeaturesToDisplay; } public static void setNumFeaturesToDisplay (int numFeaturesToDisplay) { LatticeViewer.numFeaturesToDisplay = numFeaturesToDisplay; } private static void outputTransitionCosts (PrintWriter out, ExtorInfo info, int start, int end) { Transducer ducer = info.lattice.getTransducer (); out.println ("<tr class=\"predtrans\">"); out.println ("<td class=\"label\">Cost(pred. trans)</td>"); for (int ip = start; ip < end; ip++) { if (ip == 0) { out.println ("<td></td>"); continue; } Transducer.State from = ((CRF) ducer).getState (info.bestStates.get (ip - 1).toString ()); Transducer.TransitionIterator iter = from.transitionIterator (info.fvs, ip, info.predicted, ip); if (iter.hasNext ()) { iter.next (); double cost = iter.getWeight(); String str = iter.describeTransition ((int) (Math.abs(cost) / FEATURE_CUTOFF_PCT)); out.print ("<td>" + str + "</td>"); } else { out.print ("<td>No matching transition</td>"); } } out.println ("</tr>"); out.println ("<tr class=\"targettrans\">"); out.println ("<td class=\"label\">Cost(target trans)</td>"); for (int ip = start; ip < end; ip++) { if (ip == 0) { out.println ("<td></td>"); continue; } if (!seqMatches (info.predicted, info.target, ip) || !seqMatches (info.predicted, info.target, ip - 1)) { Transducer.State from = ((CRF) ducer).getState (info.target.get (ip - 1).toString ()); if (from == null) { out.println ("<td colspan='"+(end-start)+"'>Could not find state for "+info.target.get(ip-1)+"</td>"); } else { Transducer.TransitionIterator iter = from.transitionIterator (info.fvs, ip, info.target, ip); if (iter.hasNext ()) { iter.next (); double cost = iter.getWeight(); String str = iter.describeTransition ((int) (Math.abs(cost) / FEATURE_CUTOFF_PCT)); out.print ("<td>" + str + "</td>"); } else { out.print ("<td>No matching transition</td>"); } } } else { out.print ("<td></td>"); } } out.println ("</tr>"); out.println ("<tr class=\"predtargettrans\">"); out.println ("<td class=\"label\">Cost (pred->target trans)</td>"); for (int ip = start; ip < end; ip++) { if (ip == 0) { out.println ("<td></td>"); continue; } if (!seqMatches (info.predicted, info.target, ip) || !seqMatches (info.predicted, info.target, ip - 1)) { Transducer.State from = ((CRF) ducer).getState (info.bestStates.get (ip - 1).toString ()); Transducer.TransitionIterator iter = from.transitionIterator (info.fvs, ip, info.target, ip); if (iter.hasNext ()) { iter.next (); double cost = iter.getWeight(); String str = iter.describeTransition ((int) (Math.abs(cost) / FEATURE_CUTOFF_PCT)); out.print ("<td>" + str + "</td>"); } else { out.print ("<td>No matching transition</td>"); } } else { out.print ("<td></td>"); } } out.println ("</tr>"); } private static void outputLatticeRows (PrintWriter out, SumLatticeDefault lattice, int start, int end) { DecimalFormat f = new DecimalFormat ("0.##"); Transducer ducer = lattice.getTransducer (); for (int k = 0; k < ducer.numStates(); k++) { Transducer.State state = ducer.getState (k); out.println (" <tr class=\"alpha\">"); out.println (" <td class=\"label\">&alpha;("+state.getName()+")</td>"); for (int ip = start; ip < end; ip++) { out.print ("<td>"+f.format (lattice.getAlpha (ip+1, state))+"</td>"); } out.println ("</tr>"); } for (int k = 0; k < ducer.numStates(); k++) { Transducer.State state = ducer.getState (k); out.println (" <tr class=\"beta\">"); out.println (" <td class=\"label\">&beta;("+state.getName()+")</td>"); for (int ip = start; ip < end; ip++) { out.print ("<td>"+f.format (lattice.getBeta (ip+1, state))+"</td>"); } out.println ("</tr>"); } for (int k = 0; k < ducer.numStates(); k++) { Transducer.State state = ducer.getState (k); out.println (" <tr class=\"gamma\">"); out.println (" <td class=\"label\">&gamma;("+state.getName()+")</td>"); for (int ip = start; ip < end; ip++) { out.print ("<td>"+f.format (lattice.getGammaWeight(ip+1, state))+"</td>"); } out.println ("</tr>"); } } private static void outputInputRow (PrintWriter out, TokenSequence input, int start, int end) { out.println (" <tr class=\"input\">"); out.println (" <td class=\"label\"></td>"); for (int ip = start; ip < end; ip++) { out.print ("<td>"+input.get(ip).getText()+"</td>"); } out.println (" </tr>"); } private static void outputIndices (PrintWriter out, int start, int end) { out.println (" <tr class=\"indices\">"); out.println (" <td class=\"label\"></td>"); for (int ip = start; ip < end; ip++) { out.print ("<td>"+ip+"</td>"); } out.println (" </tr>"); } private static void outputTableRow (PrintWriter out, String cssClass, Sequence seq1, Sequence seq2, int start, int end) { out.println (" <tr class=\""+cssClass+"\">"); out.println (" <td class=\"label\">"+cssClass+"</td>"); for (int i = start; i < end; i++) { if (seqMatches (seq1, seq2, i)) { out.print ("<td>"); } else { out.print ("<td class=\"error\">"); } out.print (seq1.get(i)); out.print ("</td>"); } out.println (" </tr>"); } private static void outputFeatures (PrintWriter out, FeatureVectorSequence fvs, Sequence in, Sequence output, int start, int end) { out.println (" <tr class=\"features\">\n<td class=\"label\">Features</td>"); for (int i = start; i < end; i++) { if (!seqMatches (in, output, i)) { out.print ("<td>"); FeatureVector fv = fvs.getFeatureVector (i); for (int k = 0; k < fv.numLocations (); k++) { out.print (fv.getAlphabet ().lookupObject (fv.indexAtLocation (k))); if (fv.valueAtLocation (k) != 1.0) { out.print (" "+fv.valueAtLocation (k)); } out.println ("<br />"); } out.println ("</td>"); } else { out.println ("<td></td>"); } } out.println (" </tr>"); } private static boolean seqMatches (Sequence seq1, Sequence seq2, int i) { return seq1.get(i).toString().equals (seq2.get(i).toString()); } private static boolean allSeqMatches (Sequence seq1, Sequence seq2, int start, int end) { for (int i = start; i < end; i++) { if (!seqMatches (seq1, seq2, i)) return false; } return true; } public static void extraction2html (Extraction extraction, CRFExtractor extor, PrintStream out) { PrintWriter writer = new PrintWriter (new OutputStreamWriter (out), true); extraction2html (extraction, extor, out, false); } public static void extraction2html (Extraction extraction, CRFExtractor extor, PrintWriter out) { extraction2html (extraction, extor, out, false); } public static void extraction2html (Extraction extraction, CRFExtractor extor, PrintStream out, boolean showLattice) { PrintWriter writer = new PrintWriter (new OutputStreamWriter (out), true); extraction2html (extraction, extor, writer, showLattice); } public static void extraction2html (Extraction extraction, CRFExtractor extor, PrintWriter out, boolean showLattice) { writeHeader (out); for (int i = 0; i < extraction.getNumDocuments (); i++) { DocumentExtraction docextr = extraction.getDocumentExtraction (i); String desc = docextr.getName(); String doc = ((CharSequence) docextr.getDocument ()).toString(); ExtorInfo info = infoForDoc (doc, desc, "N"+i, docextr, extor, showLattice); if (!showLattice) info.link = "lattice.html"; lattice2html (out, info); } writeFooter (out); } private static class ExtorInfo { TokenSequence input; Sequence predicted; LabelSequence target; FeatureVectorSequence fvs; MaxLattice lattice; Sequence bestStates; String link; // If non-null, name of HTML file to use for cross-links String desc; String idx; public ExtorInfo (TokenSequence input, Sequence predicted, LabelSequence target, String desc, String idx) { this.input = input; this.predicted = predicted; this.target = target; this.desc = desc; this.idx = idx; } } private static ExtorInfo infoForDoc (String doc, String desc, String idx, DocumentExtraction docextr, CRFExtractor extor, boolean showLattice) { // Instance c2 = new Instance (doc, null, null, null, extor.getTokenizationPipe ()); // TokenSequence input = (TokenSequence) c2.getData (); TokenSequence input = (TokenSequence) docextr.getInput (); LabelSequence target = docextr.getTarget (); Sequence predicted = docextr.getPredictedLabels (); ExtorInfo info = new ExtorInfo (input, predicted, target, desc, idx); if (showLattice == true) { CRF crf = extor.getCrf(); // xxx perhaps the next two lines could be a transducer method??? Instance carrier = extor.getFeaturePipe().pipe(new Instance (input, null, null, null)); info.fvs = (FeatureVectorSequence) carrier.getData (); info.lattice = new MaxLatticeDefault (crf, (Sequence) carrier.getData(), null); info.bestStates = info.lattice.bestOutputSequence(); } return info; } // Lattice files get too large if too many instances are written to one file private static final int EXTRACTIONS_PER_FILE = 25; public static void viewDualResults (File dir, Extraction e1, CRFExtractor extor1, Extraction e2, CRFExtractor extor2) throws IOException { if (e1.getNumDocuments () != e2.getNumDocuments ()) throw new IllegalArgumentException ("Extractions don't match: different number of docs."); PrintWriter errorStr = new PrintWriter (new FileWriter (new File (dir, "errors.html"))); writeDualExtractions (errorStr, e1, extor1, e2, extor2, 0, e1.getNumDocuments (), false); errorStr.close (); int max = e1.getNumDocuments (); for (int start = 0; start < max; start += EXTRACTIONS_PER_FILE) { int end = Math.min (start + EXTRACTIONS_PER_FILE, max); PrintWriter latticeStr = new PrintWriter (new FileWriter (new File (dir, "lattice-"+start+".html"))); writeDualExtractions (latticeStr, e1, extor1, e2, extor2, start, end, true); latticeStr.close (); } } private static String computeLatticeFname (int docIdx) { int htmlDocNo = docIdx / EXTRACTIONS_PER_FILE; // this will get integer truncated int start = EXTRACTIONS_PER_FILE * htmlDocNo; return "lattice-"+start+".html"; } private static void writeDualExtractions (PrintWriter out, Extraction e1, CRFExtractor extor1, Extraction e2, CRFExtractor extor2, int start, int end, boolean showLattice) { writeHeader (out); for (int i = start; i < end; i++) { DocumentExtraction doc1 = e1.getDocumentExtraction (i); DocumentExtraction doc2 = e2.getDocumentExtraction (i); String desc = doc1.getName(); String doc1Str = ((CharSequence) doc1.getDocument ()).toString(); String doc2Str = ((CharSequence) doc2.getDocument ()).toString(); if (!doc1Str.equals (doc2Str)) { System.err.println ("Skipping document "+i+": Extractions don't match"); continue; } Sequence targ1 = doc1.getPredictedLabels (); Sequence targ2 = doc2.getPredictedLabels (); if (!predictionsMatch (targ1, targ2)) { ExtorInfo info1 = infoForDoc (doc1Str, "CRF1::"+desc, "C1I"+i, doc1, extor1, showLattice); ExtorInfo info2 = infoForDoc (doc1Str, "CRF2::"+desc, "C2I"+i, doc2, extor2, showLattice); if (!showLattice) { // add links from errors.html --> lattice.html info1.link = info2.link = computeLatticeFname (i); } dualLattice2html (out, desc, info1, info2); } } writeFooter (out); } // if lattice == null, no alpha, beta values printed public static void dualLattice2html (PrintWriter out, String desc, ExtorInfo info1, ExtorInfo info2) { assert (info1.predicted.size() == info1.target.size()); assert (info1.input.size() == info1.predicted.size()); assert (info2.input.size() == info2.predicted.size()); assert (info2.predicted.size() == info2.target.size()); int N = info1.target.size(); for (int start = 0; start < N; start += LENGTH - 1) { int end = Math.min (info1.predicted.size(), start + LENGTH); if (!allSeqMatches (info1.predicted, info2.predicted, start, end)) { error2html (out, info1, start, end); error2html (out, info2, start, end); } } } private static boolean predictionsMatch (Sequence targ1, Sequence targ2) { if (targ1.size() != targ2.size()) return false; for (int i = 0; i < targ1.size(); i++) if (!targ1.get(i).toString().equals (targ2.get(i).toString())) return false; return true; } }
17,539
34.869121
142
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/RegexFieldCleaner.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import java.util.regex.Pattern; /** * A field cleaner that removes all occurrences of a given regex. * * Created: Nov 26, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: RegexFieldCleaner.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class RegexFieldCleaner implements FieldCleaner { public static final String REMOVE_PUNCT = "\\p{Punct}+"; private Pattern badRegex; public RegexFieldCleaner (String regex) { badRegex = Pattern.compile (regex); } public RegexFieldCleaner (Pattern regex) { badRegex = regex; } public String cleanFieldValue (String rawFieldValue) { String cleanString = badRegex.matcher (rawFieldValue).replaceAll (""); return cleanString; } }
1,213
28.609756
80
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/HierarchicalTokenizationFilter.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import java.util.regex.Pattern; import java.util.*; import cc.mallet.types.Label; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.Sequence; /** * Tokenization filter that will create nested spans based on a hierarchical labeling of the data. * The labels should be of the form <tt>LBL1[|LBLk]*</tt>. For example, * <pre> * A A|B A|B|C A|B|C A|B A A * w1 w2 w3 w4 w5 w6 w7 * </pre> * will result in LabeledSpans like * <tt>&lt;A>w1 &lt;B>w2 &lt;C>w3 w4&lt;/C> w5&lt;/B> w6 w7&lt;/A></tt> * * Also, labels of the form <tt>&lt;B-field></tt> will force a new instance of the field to begin, * even if it is already active. And prefixes of <tt>I-</tt> are ignored so you can use BIO labeling. * * Created: Nov 12, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: HierarchicalTokenizationFilter.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class HierarchicalTokenizationFilter implements TokenizationFilter { Pattern ignorePattern = null; public HierarchicalTokenizationFilter () { } public HierarchicalTokenizationFilter (Pattern ignorePattern) { this.ignorePattern = ignorePattern; } public LabeledSpans constructLabeledSpans (LabelAlphabet dict, Object document, Label backgroundTag, Tokenization input, Sequence seq) { LabeledSpans labeled = new LabeledSpans (document); addSpansFromTags (labeled, input, seq, dict, backgroundTag); return labeled; } private static class TagStart { int start; Label label; public TagStart (int start, Label label) { this.start = start; this.label = label; } } private void addSpansFromTags (LabeledSpans labeled, Tokenization input, Sequence tags, LabelAlphabet dict, Label backgroundTag) { int i = 0; LinkedList openTags = new LinkedList(); String[] lastTagSplit = new String [0]; while (i < tags.size()) { Label thisTag = dict.lookupLabel (tags.get(i).toString()); String[] thisTagSplit = splitTag (thisTag); int numToClose = compareSplitTags (thisTagSplit, lastTagSplit); // close all that need to be closed while (numToClose > 0) { TagStart tagStart = (TagStart) openTags.removeLast (); addLabeledSpan (labeled, input, tagStart, i, backgroundTag); numToClose--; } // open all that need to be opened for (int tidx = openTags.size (); tidx < thisTagSplit.length; tidx++) { openTags.add (new TagStart (i, dict.lookupLabel (thisTagSplit [tidx]))); } lastTagSplit = thisTagSplit; i++; } // Close all remaining tags while (!openTags.isEmpty ()) { TagStart tagStart = (TagStart) openTags.removeLast (); addLabeledSpan (labeled, input, tagStart, i, backgroundTag); } } private void addLabeledSpan (LabeledSpans labeled, Tokenization input, TagStart tagStart, int end, Label backgroundTag) { Span span = input.subspan (tagStart.start, end); Label splitTag = tagStart.label; labeled.add (new LabeledSpan (span, splitTag, splitTag == backgroundTag)); } private int compareSplitTags (String[] thisTagSplit, String[] lastTagSplit) { int idx = lastTagSplit.length - 1; for (; idx >= 0; idx--) { if (idx >= thisTagSplit.length) continue; String thisTag = thisTagSplit [idx]; if (isBeginName (thisTag)) continue; if (matches (lastTagSplit [idx], thisTag)) break; } int numToClose = lastTagSplit.length - idx - 1; // sanity check while (idx >= 0) { if (!matches (thisTagSplit[idx], lastTagSplit [idx])) { throw new IllegalArgumentException ("Tags don't match."); } idx--; } return numToClose; } private boolean matches (String str1, String str2) { return trim (str1).equals (trim (str2)); } private String trim (String name) { if (isBeginName (name) || isInsideName (name)) return (name.substring (2)); else return name; } private String[] splitTag (Label tag) { String name = tag.toString (); List split1 = new ArrayList (Arrays.asList (name.split ("\\|"))); Iterator it = split1.iterator (); while (it.hasNext()) { String str = (String) it.next(); if (ignorePattern != null && ignorePattern.matcher (str).matches ()) it.remove (); } return (String[]) split1.toArray (new String[0]); } private boolean isBeginName (String name) { return name.startsWith ("B-"); } private boolean isInsideName (String name) { return name.startsWith ("I-"); } }
5,259
29.404624
110
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/DefaultTokenizationFilter.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import java.io.Serializable; import cc.mallet.types.Label; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.Sequence; /** * Created: Nov 12, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: DefaultTokenizationFilter.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class DefaultTokenizationFilter implements TokenizationFilter, Serializable { public LabeledSpans constructLabeledSpans (LabelAlphabet dict, Object document, Label backgroundTag, Tokenization input, Sequence seq) { LabeledSpans labeled = new LabeledSpans (document); addSpansFromTags (labeled, input, seq, dict, backgroundTag); return labeled; } private void addSpansFromTags (LabeledSpans labeled, Tokenization input, Sequence tags, LabelAlphabet dict, Label backgroundTag) { int i = 0; int docidx = 0; while (i < tags.size()) { Label thisTag = dict.lookupLabel (tags.get(i).toString()); int startTokenIdx = i; while (i < tags.size()) { Label nextTag = dict.lookupLabel (tags.get(i).toString ()); if (thisTag != nextTag) break; i++; } int endTokenIdx = i; Span span = input.subspan(startTokenIdx, endTokenIdx); addBackgroundIfNecessary (labeled, (StringSpan) span, docidx, backgroundTag); docidx = ((StringSpan) span).getEndIdx (); labeled.add (new LabeledSpan (span, thisTag, thisTag == backgroundTag)); } } private void addBackgroundIfNecessary (LabeledSpans labeled, StringSpan span, int docidx, Label background) { int nextIdx = span.getStartIdx (); if (docidx < nextIdx) { Span newSpan = new StringSpan ((CharSequence) span.getDocument (), docidx, nextIdx); labeled.add (new LabeledSpan (newSpan, background, true)); } } }
2,379
36.777778
110
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/FieldComparator.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; /** * Interface for functions that compares extracted values of a field to see * if they match. These are used by the evaluation metrics (e.g., * @link{PerDocumentF1Evaluator}) to see if the extraction is correct. * * Created: Nov 23, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: FieldComparator.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public interface FieldComparator { /** * Returns true if the given two slot fillers match. */ public boolean matches (String fieldVal1, String fieldVal2); }
1,023
36.925926
78
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/ExtractionEvaluator.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; /** * Created: Oct 8, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: ExtractionEvaluator.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public interface ExtractionEvaluator { public void evaluate (Extraction extraction); }
729
35.5
82
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/Tokenization.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.extract; import cc.mallet.types.*; public interface Tokenization extends Sequence //?? { /** * Returns the document of which this is a tokenization. */ public Object getDocument (); public Span getSpan (int i); /** Returns a span formed by concatenating the spans from start to end. * In more detail: * <ul> * <li>The start of the new span will be the start index of <tt>getSpan(start)</tt>. * <li>The end of the new span will be the start index of <tt>getSpan(end)</tt>. * <li>Unless <tt>start == end</tt>, the new span will completely include <tt>getSpan(start)</tt>. * <li>The new span will never intersect <tt>getSpan(end)</tt> * <li>If <tt>start == end</tt>, then the new span contains no text. * </ul> * * @param start The index of the first token in the new span (inclusive). * This is an index of a token, *not* an index into the document. * @param end The index of the first token in the new span (exclusive). * This is an index of a token, *not* an index into the document. * @return A span into this tokenization's document */ Span subspan (int start, int end); }
1,695
37.545455
102
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/DocumentViewer.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import java.io.File; import java.io.PrintWriter; import java.io.FileWriter; import java.io.IOException; import java.util.ArrayList; import java.util.List; import cc.mallet.types.Label; import cc.mallet.types.LabelAlphabet; import cc.mallet.util.ColorUtils; /** * Diagnosis class that outputs HTML pages that allows you to view errors on a more * global per-instance basis. * * Created: Mar 30, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: DocumentViewer.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class DocumentViewer { private static final String DOC_ERRS_CSS_FNAME = "docerrs.css"; private static final String DOC_ERRS_PRED_CSS_FNAME = "docerrs-by-pred.css"; private static final String DOC_ERRS_TRUE_CSS_FNAME = "docerrs-by-true.css"; private static final double SATURATION = 0.4; private static class DualLabeledSpans { DualLabeledSpans (LabeledSpans ls1, LabeledSpans ls2) { ls = new LabeledSpans[] { ls1, ls2 }; } private LabeledSpans[] ls; int size () { return ls[0].size(); } LabeledSpan get (int t, int i) { return ls[i].getLabeledSpan (t); } } /** * Writes several HTML files describing a given extraction. Each HTML file shows an entire * document, with the extracted fields color-coded. * @param directory Directory to write files to * @param extraction Extraction to describe * @throws IOException */ public static void writeExtraction (File directory, Extraction extraction) throws IOException { outputIndex (directory, extraction); outputStylesheets (directory, extraction); outputDocuments (directory, extraction); } private static void outputStylesheets (File directory, Extraction extraction) throws IOException { // ERRS css PrintWriter out = new PrintWriter (new FileWriter (new File (directory, DOC_ERRS_CSS_FNAME))); out.println (".tf_legend { border-style: dashed; border-width: 2px; padding: 10px; padding-top: 0ex; float: right; margin:2em; }"); out.println (".class_legend { visibility: hidden; }"); out.println (".correct { background-color:#33FF33; }"); out.println (".wrong { background-color:pink }"); out.println (".true { background-color:#99CCFF; }"); out.println (".pred { background-color:#FFFF66 }"); out.close (); //PRED css LabelAlphabet dict = extraction.getLabelAlphabet (); String[] fields = determineFieldNames (dict); String[] colors = ColorUtils.rainbow (fields.length, (float) SATURATION, 1); out = new PrintWriter (new FileWriter (new File (directory, DOC_ERRS_PRED_CSS_FNAME))); out.println (".class_legend { border-style: dashed; border-width: 2px; padding: 10px; padding-top: 0ex; float: right; margin:2em; }"); out.println (".tf_legend { visibility: hidden; }"); for (int i = 0; i < fields.length; i++) { out.println (".pred_"+fields[i]+" { background-color:"+colors[i]+"; }"); } out.close (); //TRUE css out = new PrintWriter (new FileWriter (new File (directory, DOC_ERRS_TRUE_CSS_FNAME))); out.println (".class_legend { border-style: dashed; border-width: 2px; padding: 10px; padding-top: 0ex; float: right; margin:2em; }"); out.println (".tf_legend { visibility: hidden; }"); for (int i = 0; i < fields.length; i++) { out.println (".true_"+fields[i]+" { background-color:"+colors[i]+"; }"); } out.close (); } private static void outputDocuments (File directory, Extraction extraction) throws IOException { for (int i = 0; i < extraction.getNumDocuments (); i++) { PrintWriter out = new PrintWriter (new FileWriter (new File (directory, "extraction"+i+".html"))); outputOneDocument (out, extraction.getDocumentExtraction (i)); out.close (); } } private static void outputOneDocument (PrintWriter out, DocumentExtraction docExtr) { String name = docExtr.getName (); out.println ("<HTML><HEAD><TITLE>"+name+": Extraction from Document</TITLE>"); out.println ("<LINK REL=\"stylesheet\" TYPE=\"text/css\" HREF=\""+DOC_ERRS_CSS_FNAME+"\" title=\"Agreement\" />"); out.println ("<LINK REL=\"stylesheet\" TYPE=\"text/css\" HREF=\""+DOC_ERRS_PRED_CSS_FNAME+"\" title=\"Pred\" />"); out.println ("<LINK REL=\"stylesheet\" TYPE=\"text/css\" HREF=\""+DOC_ERRS_TRUE_CSS_FNAME+"\" title=\"True\" />"); out.println ("</HEAD><BODY>"); outputClassLegend (out, docExtr.getExtractedSpans ().getLabeledSpan (0).getLabel ().getLabelAlphabet ()); outputRightWrongLegend (out); DualLabeledSpans spans = intersectSpans (docExtr); for (int i = 0; i < spans.size(); i++) { LabeledSpan predSpan = spans.get (i, 0); LabeledSpan trueSpan = spans.get (i, 1); Label predLabel = predSpan.getLabel (); Label trueLabel = trueSpan.getLabel (); boolean predNonBgrnd = !predSpan.isBackground (); boolean trueNonBgrnd = !trueSpan.isBackground (); boolean isBackground = !predNonBgrnd && !trueNonBgrnd; String spanClass = null; if (predNonBgrnd && trueNonBgrnd) { if (predLabel == trueLabel) { spanClass = "correct"; } else { spanClass = "wrong"; } } else if (predNonBgrnd) { spanClass = "pred"; } else if (trueNonBgrnd) { spanClass = "true"; } if (!isBackground) out.print ("<SPAN CLASS=\"pred_"+predLabel+"\">"); if (!isBackground) out.print ("<SPAN CLASS=\"true_"+trueLabel+"\">"); if (spanClass != null) { out.print ("<SPAN CLASS=\""+spanClass+"\">"); } String text = predSpan.getSpan ().getText (); text = text.replaceAll ("<", "&lt;"); text = text.replaceAll ("\n", "\n<P>"); out.print (text); if (spanClass != null) { out.print ("</SPAN>"); } if (!isBackground) out.print ("</SPAN></SPAN>"); out.println (); } out.println ("</BODY></HTML>"); } private static void outputRightWrongLegend (PrintWriter out) { out.println ("<DIV CLASS=\"tf_legend\"><B>LEGEND</B><BR>"); out.println ("<SPAN CLASS='correct'>Correct</SPAN><BR />"); out.println ("<SPAN CLASS='wrong'>Wrong</SPAN><BR />"); out.println ("<SPAN CLASS='true'>False Negative</SPAN> (True field but predicted background)<BR />"); out.println ("<SPAN CLASS='pred'>False Positive</SPAN> (True background but predicted field)<BR />"); out.println ("</DIV>"); } private static void outputClassLegend (PrintWriter out, LabelAlphabet dict) { out.println ("<DIV CLASS=\"class_legend\">"); out.println ("<H4>LEGEND</H4>"); String[] fields = determineFieldNames (dict); String[] colors = ColorUtils.rainbow (fields.length, (float) SATURATION, 1); for (int i = 0; i < fields.length; i++) { out.println ("<SPAN STYLE=\"background-color:"+colors[i]+"\">"+fields[i]+"</SPAN><BR />"); } out.println ("</DIV>"); } private static String[] determineFieldNames (LabelAlphabet dict) { List l = new ArrayList (); for (int i = 0; i < dict.size (); i++) { String lname = dict.lookupLabel (i).toString (); if (!lname.startsWith ("B-") && !lname.startsWith ("I-")) { l.add (lname); } } return (String[]) l.toArray (new String [l.size ()]); } private static DualLabeledSpans intersectSpans (DocumentExtraction docExtr) { int predIdx = 0; int trueIdx = 0; LabeledSpans trueSpans = docExtr.getTargetSpans (); LabeledSpans predSpans = docExtr.getExtractedSpans (); LabeledSpans retPredSpans = new LabeledSpans (predSpans.getDocument ()); LabeledSpans retTrueSpans = new LabeledSpans (predSpans.getDocument ()); while ((predIdx < predSpans.size()) && (trueIdx < trueSpans.size ())) { LabeledSpan predSpan = predSpans.getLabeledSpan (predIdx); LabeledSpan trueSpan = trueSpans.getLabeledSpan (trueIdx); LabeledSpan newPredSpan = (LabeledSpan) predSpan.intersection (trueSpan); LabeledSpan newTrueSpan = (LabeledSpan) trueSpan.intersection (predSpan); retPredSpans.add (newPredSpan); retTrueSpans.add (newTrueSpan); if (predSpan.getEndIdx () <= trueSpan.getEndIdx ()) { predIdx++; } if (trueSpan.getEndIdx () <= predSpan.getEndIdx ()) { trueIdx++; } } assert (retPredSpans.size() == retTrueSpans.size()); return new DualLabeledSpans (retPredSpans, retTrueSpans); } private static void outputIndex (File directory, Extraction extraction) throws IOException { PrintWriter out = new PrintWriter (new FileWriter (new File (directory, "index.html"))); out.println ("<HTML><HEAD><TITLE>Extraction Results</TITLE></HEAD><BODY><OL>"); for (int i = 0; i < extraction.getNumDocuments(); i++) { String name = extraction.getDocumentExtraction (i).getName (); out.println (" <LI><A HREF=\"extraction"+i+".html\">"+name+"</A></LI>"); } out.println ("</OL></BODY></HTML>"); out.close (); } }
9,445
38.358333
138
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/TransducerExtractionConfidenceEstimator.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Aron Culotta <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.extract; import java.io.*; import java.util.*; import cc.mallet.fst.*; import cc.mallet.fst.confidence.*; import cc.mallet.pipe.*; import cc.mallet.types.*; /** * Estimates the confidence in the labeling of a LabeledSpan using a * TransducerConfidenceEstimator. */ public class TransducerExtractionConfidenceEstimator extends ExtractionConfidenceEstimator implements Serializable { TransducerConfidenceEstimator confidenceEstimator; Pipe featurePipe; public TransducerExtractionConfidenceEstimator (TransducerConfidenceEstimator confidenceEstimator, Object[] startTags, Object[] continueTags, Pipe featurePipe) { super(); this.confidenceEstimator = confidenceEstimator; this.featurePipe = featurePipe; } public void estimateConfidence (DocumentExtraction documentExtraction) { Tokenization input = documentExtraction.getInput(); // WARNING: input Tokenization will likely already have many // features appended from the last time it was passed through a // featurePipe. To avoid a redundant calculation of features, the // caller may want to set this.featurePipe = // TokenSequence2FeatureVectorSequence Instance carrier = this.featurePipe.pipe(new Instance(input, null, null, null)); Sequence pipedInput = (Sequence) carrier.getData(); Sequence prediction = documentExtraction.getPredictedLabels(); LabeledSpans labeledSpans = documentExtraction.getExtractedSpans(); SumLatticeDefault lattice = new SumLatticeDefault (this.confidenceEstimator.getTransducer(), pipedInput); for (int i=0; i < labeledSpans.size(); i++) { LabeledSpan span = labeledSpans.getLabeledSpan(i); if (span.isBackground()) continue; int[] segmentBoundaries = getSegmentBoundaries(input, span); Segment segment = new Segment(pipedInput, prediction, prediction, segmentBoundaries[0], segmentBoundaries[1], null, null); span.setConfidence(confidenceEstimator.estimateConfidenceFor(segment, lattice)); } } /** Convert the indices of a LabeledSpan into indices for a Tokenization. * @return array of size two, where first index is start Token, * second is end Token, inclusive */ private int[] getSegmentBoundaries (Tokenization tokens, LabeledSpan labeledSpan) { int startCharIndex = labeledSpan.getStartIdx(); int endCharIndex = labeledSpan.getEndIdx()-1; int[] ret = new int[]{-1,-1}; for (int i=0; i < tokens.size(); i++) { int charIndex = tokens.getSpan(i).getStartIdx(); if (charIndex <= endCharIndex && charIndex >= startCharIndex) { if (ret[0] == -1) { ret[0] = i; ret[1] = i; } else ret[1] = i; } } if (ret[0] == -1 || ret[1] == -1) throw new IllegalArgumentException("Unable to find segment boundaries from span " + labeledSpan); return ret; } }
3,638
38.554348
114
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/ExtractionConfidenceEstimator.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Aron Culotta <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.extract; /** * Estimates the confidence in the labeling of a LabeledSpan. */ abstract public class ExtractionConfidenceEstimator { public void estimateConfidence (Extraction extraction) { for (int i=0; i < extraction.getNumDocuments(); i++) estimateConfidence(extraction.getDocumentExtraction(i)); } abstract public void estimateConfidence (DocumentExtraction documentExtraction); }
940
31.448276
86
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/Extraction.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.extract; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.io.PrintWriter; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.Sequence; /** * The results of doing information extraction. This is designed to handle * field extraction from a single document, or relation extraction and * coreference from multiple documents; */ public class Extraction { private Extractor extractor; private List byDocs = new ArrayList (); // List of DocumentExtractions private List records = new ArrayList (); // If the DocumentExtractions contain true targets (i.e., they're labeled testing instances, // then these are the true records obtained from those List trueRecords = new ArrayList (); private LabelAlphabet dict; /** * Creates an empty Extraction option. DocumentExtractions can be added later by * the addDocumentExtraction method. */ public Extraction (Extractor extractor, LabelAlphabet dict) { this.extractor = extractor; this.dict = dict; } /** * Creates an extration given a sequence output by some kind of per-sequece labeler, like an * HMM or a CRF. The extraction will contain a single document. */ public Extraction (Extractor extractor, LabelAlphabet dict, String name, Tokenization input, Sequence output, String background) { this.extractor = extractor; this.dict = dict; DocumentExtraction docseq = new DocumentExtraction (name, dict, input, output, background); addDocumentExtraction (docseq); } public void addDocumentExtraction (DocumentExtraction docseq) { byDocs.add (docseq); records.add (new Record (docseq.getName (), docseq.getExtractedSpans ())); if (docseq.getTargetSpans () != null) { trueRecords.add (new Record ("TRUE:"+docseq.getName (), docseq.getTargetSpans ())); } } public Record getRecord (int idx) { return (Record) records.get (idx); } public int getNumRecords () { return records.size(); } public DocumentExtraction getDocumentExtraction(int idx) { return (DocumentExtraction) byDocs.get (idx); } public int getNumDocuments () { return byDocs.size(); } public Extractor getExtractor () { return extractor; } public Record getTargetRecord (int docnum) { return (Record) trueRecords.get (docnum); } public LabelAlphabet getLabelAlphabet () { return dict; } public void cleanFields (FieldCleaner cleaner) { Iterator it = records.iterator (); while (it.hasNext ()) { cleanRecord ((Record) it.next (), cleaner); } it = trueRecords.iterator (); while (it.hasNext ()) { cleanRecord ((Record) it.next (), cleaner); } } private void cleanRecord (Record record, FieldCleaner cleaner) { Iterator it = record.fieldsIterator (); while (it.hasNext ()) { Field field = (Field) it.next (); field.cleanField (cleaner); } } public void print (PrintWriter writer) { Iterator it = records.iterator (); writer.println ("***EXTRACTION***"); while (it.hasNext ()) { Record record = (Record) it.next (); writer.println ("**RECORD "+record.getName ()); Iterator fit = record.fieldsIterator (); while (fit.hasNext ()) { Field field = (Field) fit.next (); writer.println (field.getName ()); for (int fidx = 0; fidx < field.numValues (); fidx++) { String val = field.value (fidx).replaceAll ("\n", " "); writer.print (" ==> "+val+"\n"); } writer.println (); } } writer.println ("***END EXTRACTION***"); } }
4,151
28.870504
130
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/PerDocumentF1Evaluator.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import java.io.PrintStream; import java.io.OutputStream; import java.io.PrintWriter; import java.io.OutputStreamWriter; import java.text.DecimalFormat; import java.util.Iterator; import cc.mallet.types.Label; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.MatrixOps; /** * Created: Oct 8, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: PerDocumentF1Evaluator.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class PerDocumentF1Evaluator implements ExtractionEvaluator { private FieldComparator comparator = new ExactMatchComparator (); private PrintStream errorOutputStream = null; public FieldComparator getComparator () { return comparator; } public void setComparator (FieldComparator comparator) { this.comparator = comparator; } public PrintStream getErrorOutputStream () { return errorOutputStream; } public void setErrorOutputStream (OutputStream errorOutputStream) { // Work around java bug when wrapping System.out if (errorOutputStream instanceof PrintStream) { this.errorOutputStream = (PrintStream) errorOutputStream; } else { this.errorOutputStream = new PrintStream (errorOutputStream); } } public void evaluate (Extraction extraction) { evaluate (extraction, System.out); } public void evaluate (Extraction extraction, PrintStream out) { evaluate ("", extraction, new PrintWriter (new OutputStreamWriter (out), true)); } public void evaluate (Extraction extraction, PrintWriter out) { evaluate ("", extraction, out); } // Assumes that there are as many records as documents, indexed by docs. // Assumes that extractor returns at most one value public void evaluate (String description, Extraction extraction, PrintWriter out) { int numDocs = extraction.getNumDocuments (); assert numDocs == extraction.getNumRecords (); LabelAlphabet dict = extraction.getLabelAlphabet(); int numLabels = dict.size(); int[] numCorr = new int [numLabels]; int[] numPred = new int [numLabels]; int[] numTrue = new int [numLabels]; for (int docnum = 0; docnum < numDocs; docnum++) { Record extracted = extraction.getRecord (docnum); Record target = extraction.getTargetRecord (docnum); // Calc precision Iterator it = extracted.fieldsIterator (); while (it.hasNext ()) { Field predField = (Field) it.next (); Label name = predField.getName (); Field trueField = target.getField (name); int idx = name.getIndex (); numPred [idx]++; if (predField.numValues() > 1) System.err.println ("Warning: Field "+predField+" has more than one extracted value. Picking arbitrarily..."); if (trueField != null && trueField.isValue (predField.value (0), comparator)) { numCorr [idx]++; } else { // We have an error, report if necessary if (errorOutputStream != null) { //xxx TODO: Display name of supporting document errorOutputStream.println ("Error in extraction! Document "+extraction.getDocumentExtraction (docnum).getName ()); errorOutputStream.println ("Predicted "+predField); errorOutputStream.println ("True "+trueField); errorOutputStream.println (); } } } // Calc true it = target.fieldsIterator (); while (it.hasNext ()) { Field trueField = (Field) it.next (); Label name = trueField.getName (); numTrue [name.getIndex ()]++; } } DecimalFormat f = new DecimalFormat ("0.####"); double totalF1 = 0; int totalFields = 0; out.println (description+" per-document F1"); out.println ("Name\tP\tR\tF1"); for (int i = 0; i < numLabels; i++) { double P = (numPred[i] == 0) ? 0 : ((double)numCorr[i]) / numPred [i]; double R = (numTrue[i] == 0) ? 1 : ((double)numCorr[i]) / numTrue [i]; double F1 = (P + R == 0) ? 0 : (2 * P * R) / (P + R); if ((numPred[i] > 0) || (numTrue[i] > 0)) { totalF1 += F1; totalFields++; } Label name = dict.lookupLabel (i); out.println (name+"\t"+f.format(P)+"\t"+f.format(R)+"\t"+f.format(F1)); } int totalCorr = MatrixOps.sum (numCorr); int totalPred = MatrixOps.sum (numPred); int totalTrue = MatrixOps.sum (numTrue); double P = ((double)totalCorr) / totalPred; double R = ((double)totalCorr) / totalTrue; double F1 = (2 * P * R) / (P + R); out.println ("OVERALL (micro-averaged) P="+f.format(P)+" R="+f.format(R)+" F1="+f.format(F1)); out.println ("OVERALL (macro-averaged) F1="+f.format(totalF1/totalFields)); out.println (); } }
5,221
32.474359
126
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/StringSpan.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.extract; import java.io.ObjectOutputStream; import java.io.ObjectInputStream; import java.io.IOException; import cc.mallet.types.Token; /** A sub-section of a linear string. */ public class StringSpan extends Token implements Span { private CharSequence document; // The larger string of which this is a span. private int start, end; public StringSpan (CharSequence doc, int start, int end) { super (constructTokenText (doc, start, end)); this.document = doc; this.start = start; this.end = end; } public Span intersection (Span r) { StringSpan other = (StringSpan) r; int newStart = Math.max (start, other.start); int newEnd = Math.min (end, other.end); return new StringSpan (document, newStart, newEnd); } private static String constructTokenText (CharSequence doc, int start, int end) { CharSequence subseq = doc.subSequence(start,end); return subseq.toString(); } public Object getDocument () { return document; } public boolean intersects (Span r) { if (!(r instanceof StringSpan)) return false; StringSpan sr = (StringSpan)r; return (sr.document == this.document && !(sr.end < this.start || sr.start > this.end)); } public boolean isSubspan (Span r) { return (r.getDocument() == this.document && (this.start <= r.getStartIdx ()) && (r.getEndIdx () <= this.end)); } public int getStartIdx () { return start; } public int getEndIdx () { return end; } public String toString() { return super.toString() + " span["+start+".."+end+"]"; } // Serialization garbage private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); out.writeInt (CURRENT_SERIAL_VERSION); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); int version = in.readInt (); } }
2,584
24.594059
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/Field.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import java.util.ArrayList; import java.util.List; import java.util.Iterator; import java.util.ListIterator; import cc.mallet.types.Label; /** * Created: Oct 12, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: Field.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class Field { private Label name; private List values = new ArrayList (); private List allSpans = new ArrayList (); public Field (LabeledSpan span) { name = span.getLabel (); addFiller (span); } public Label getName () { return name; } public int numValues () { return values.size (); } public String value (int i) { return (String) values.get (i); } public LabeledSpan span (int i) { return (LabeledSpan) allSpans.get (i); } public void addFiller (LabeledSpan span) { if (name != span.getLabel ()) throw new IllegalArgumentException ("Attempt to fill slot "+name+" with a span of type "+span.getLabel ()); values.add (span.getText ()); allSpans.add (span); } void cleanField (FieldCleaner cleaner) { //??? Should I prevent the same cleaner from running twice? ListIterator it = values.listIterator (); while (it.hasNext()) { String rawValue = (String) it.next (); it.remove (); it.add (cleaner.cleanFieldValue (rawValue)); } } /** * Returns true if <tt>filler</tt> is an exact match to one of the values * of this field. */ public boolean isValue (String filler) { return values.contains (filler); } public boolean isValue (String filler, FieldComparator comper) { for (Iterator it = values.iterator (); it.hasNext ();) { String s = (String) it.next (); if (comper.matches (filler, s)) return true; } return false; } public String toString () { StringBuffer buf = new StringBuffer (); buf.append ("FIELD NAME: "); buf.append (name); buf.append ("\n"); for (Iterator it = values.iterator (); it.hasNext ();) { String s = (String) it.next (); buf.append ("FILLER:"); buf.append (s); buf.append ("\n"); } return buf.toString (); } }
2,633
25.079208
113
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/FieldCleaner.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; /** * Interface for functions that are used to clean up field values after * extraction has been performed. * * Created: Nov 25, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: FieldCleaner.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public interface FieldCleaner { /** * Returns a post-processed version of a field. * @param rawFieldValue * @return A processed string */ String cleanFieldValue (String rawFieldValue); }
947
32.857143
76
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/TokenizationFilter.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import cc.mallet.types.Label; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.Sequence; /** * Created: Nov 12, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: TokenizationFilter.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public interface TokenizationFilter { /** * Converts a the sequence of labels into a set of labeled spans. Essentially, this converts the * output of sequence labeling into an extraction output. * @param dict * @param document * @param backgroundTag * @param input * @param seq * @return */ LabeledSpans constructLabeledSpans (LabelAlphabet dict, Object document, Label backgroundTag, Tokenization input, Sequence seq); }
1,241
35.529412
99
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/DocumentExtraction.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import org.jdom.Element; import org.jdom.Document; import org.jdom.Namespace; import org.jdom.Text; import org.jdom.output.XMLOutputter; import cc.mallet.types.*; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; import gnu.trove.THashMap; /** * Created: Oct 12, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: DocumentExtraction.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ //TODO: Add place where user can have general Transducers to change CRF tokenization into LabeledSpans //TODO: Add field for CRF's labeled tokenization public class DocumentExtraction implements Serializable { private Tokenization input; private Sequence predictedLabels; private LabelSequence target; private LabeledSpans extractedSpans; private LabeledSpans targetSpans; private Object document; private Label backgroundTag; private String name; public DocumentExtraction (String name, LabelAlphabet dict, Tokenization input, Sequence predicted, String background) { this (name, dict, input, predicted, null, background, new BIOTokenizationFilter ()); } public DocumentExtraction (String name, LabelAlphabet dict, Tokenization input, Sequence predicted, Sequence target, String background) { this (name, dict, input, predicted, target, background, new BIOTokenizationFilter ()); } public DocumentExtraction (String name, LabelAlphabet dict, Tokenization input, Sequence predicted, Sequence target, String background, TokenizationFilter filter) { this.document = input.getDocument (); this.name = name; assert (input.size() == predicted.size()); this.backgroundTag = dict.lookupLabel (background); this.input = input; this.predictedLabels = predicted; this.extractedSpans = filter.constructLabeledSpans (dict, document, backgroundTag, input, predicted); if (target != null) { if (target instanceof LabelSequence) this.target = (LabelSequence) target; this.targetSpans = filter.constructLabeledSpans (dict, document, backgroundTag, input, target); } } public DocumentExtraction (String name, LabelAlphabet dict, Tokenization input, LabeledSpans predictedSpans, LabeledSpans trueSpans, String background) { this.document = input.getDocument (); this.name = name; this.backgroundTag = dict.lookupLabel (background); this.input = input; this.extractedSpans = predictedSpans; this.targetSpans = trueSpans; } public Object getDocument () { return document; } public Tokenization getInput () { return input; } public Sequence getPredictedLabels () { return predictedLabels; } public LabeledSpans getExtractedSpans () { return extractedSpans; } public LabeledSpans getTargetSpans () { return targetSpans; } public LabelSequence getTarget () { return target; } public String getName () { return name; } public Label getBackgroundTag () { return backgroundTag; } //xxx nyi public Span subspan (int start, int end) { throw new UnsupportedOperationException ("not yet implemented."); } public Document toXmlDocument () { return toXmlDocument ("doc", Namespace.NO_NAMESPACE); } /* public Document toXmlDocument (String rootEltName, Namespace ns) { Element element = new Element (rootEltName, ns); for (int i = 0; i < extractedSpans.size(); i++) { LabeledSpan span = (LabeledSpan) extractedSpans.get(i); Label tag = span.getLabel(); if (tag == backgroundTag) { org.jdom.Parent p = element.addContent (span.getText ()); } else { Element field = new Element (tag.toString(), ns); field.setText (span.getText ()); element.addContent (field); } } return new Document (element); } */ // does not do non-overlap sanity checking public Document toXmlDocument (String rootEltName, Namespace ns) { ArrayList orderedByStart = new ArrayList (extractedSpans); Collections.sort (orderedByStart, new Comparator () { public int compare (Object o, Object o1) { int start1 = ((Span)o).getStartIdx (); int start2 = ((Span)o1).getStartIdx (); return Double.compare (start1, start2); } } ); ArrayList roots = new ArrayList (orderedByStart); THashMap children = new THashMap (); for (int i = 0; i < orderedByStart.size(); i++) { LabeledSpan child = (LabeledSpan) orderedByStart.get (i); for (int j = i-1; j >= 0; j--) { LabeledSpan parent = (LabeledSpan) orderedByStart.get (j); if (parent.isSubspan (child)) { List childList = (List) children.get (parent); if (childList == null) { childList = new ArrayList (); children.put (parent, childList); } roots.remove (child); childList.add (child); break; } } } CharSequence doc = (CharSequence) document; Span wholeDoc = new StringSpan (doc, 0, doc.length ()); return new Document (generateElement (rootEltName, wholeDoc, roots, children)); } private Element generateElement (String parentName, Span span, List childSpans, THashMap tree) { Element parentElt = new Element (parentName); if (childSpans == null || childSpans.isEmpty ()) { parentElt.setContent (new Text (span.getText ())); } else { List childElts = new ArrayList (childSpans.size()); int start = span.getStartIdx (); int current = 0; for (int i = 0; i < childSpans.size(); i++) { LabeledSpan childSpan = (LabeledSpan) childSpans.get (i); Label childLabel = childSpan.getLabel(); int childStart = childSpan.getStartIdx () - start; if (childStart > current) { childElts.add (new Text (span.getText().substring (current, childStart))); } if (childLabel == backgroundTag) { childElts.add (new Text (childSpan.getText())); } else { String name = childLabel.getEntry ().toString(); List grandchildren = (List) tree.get (childSpan); childElts.add (generateElement (name, childSpan, grandchildren, tree)); } current = childSpan.getEndIdx () - start; } if (current < span.getEndIdx ()) childElts.add (new Text (span.getText().substring (current))); parentElt.addContent (childElts); } return parentElt; } public String toXmlString () { Document jdom = toXmlDocument (); XMLOutputter outputter = new XMLOutputter (); return outputter.outputString (jdom); } public int size () { return extractedSpans.size(); } // Serialization garbage private static final long serialVersionUID = 1L; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject(ObjectOutputStream out) throws IOException { out.defaultWriteObject(); out.writeInt(CURRENT_SERIAL_VERSION); } private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject(); in.readInt(); // read version } }
7,945
27.480287
120
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/LabeledSpan.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; import cc.mallet.types.Label; /** * Created: Oct 12, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: LabeledSpan.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ //xxx Maybe this is the same thing as a field?? public class LabeledSpan implements Span, Serializable { private Span span; private Label label; private boolean isBackground; private double confidence; public LabeledSpan (Span span, Label label, boolean isBackground) { this (span, label, isBackground, 1.0); } public LabeledSpan (Span span, Label label, boolean isBackground, double confidence) { this.span = span; this.label = label; this.isBackground = isBackground; this.confidence = confidence; } public Span getSpan () { return span; } public Label getLabel () { return label; } public String getText () { return span.getText (); } public Object getDocument () { return span.getDocument (); } public double getConfidence () { return confidence; } void setConfidence (double c) { this.confidence = c; } public boolean intersects (Span r) { return span.intersects (r); } public boolean isSubspan (Span r) { return span.isSubspan (r); } public Span intersection (Span r) { LabeledSpan other = (LabeledSpan) r; Span newSpan = getSpan ().intersection (other.getSpan ()); return new LabeledSpan (newSpan, label, isBackground, confidence); } public int getEndIdx () { return span.getEndIdx (); } public int getStartIdx () { return span.getStartIdx (); } public boolean isBackground () { return isBackground; } public String toString () { return label.toString ()+" [span "+getStartIdx ()+".."+getEndIdx ()+" confidence="+confidence+"]"; } // Serialization garbage private static final long serialVersionUID = 1L; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject(ObjectOutputStream out) throws IOException { out.defaultWriteObject(); out.writeInt(CURRENT_SERIAL_VERSION); } private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject(); in.readInt(); // read version } }
2,845
21.587302
102
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/CRFExtractor.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import java.io.*; import java.util.ArrayList; import java.util.Iterator; import cc.mallet.fst.CRF; import cc.mallet.pipe.Noop; import cc.mallet.pipe.Pipe; import cc.mallet.pipe.SerialPipes; import cc.mallet.types.*; /** * Created: Oct 12, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: CRFExtractor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class CRFExtractor implements Extractor { private CRF crf; private Pipe tokenizationPipe; private Pipe featurePipe; private String backgroundTag; private TokenizationFilter filter; public CRFExtractor (CRF crf) { this (crf, new Noop ()); } public CRFExtractor (File crfFile) throws IOException { this (loadCrf(crfFile), new Noop ()); } public CRFExtractor (CRF crf, Pipe tokpipe) { this (crf, tokpipe, new BIOTokenizationFilter ()); } public CRFExtractor (CRF crf, Pipe tokpipe, TokenizationFilter filter) { this (crf, tokpipe, filter, "O"); } public CRFExtractor (CRF crf, Pipe tokpipe, TokenizationFilter filter, String backgroundTag) { this.crf = crf; tokenizationPipe = tokpipe; featurePipe = (Pipe) crf.getInputPipe (); this.filter = filter; this.backgroundTag = backgroundTag; } private static CRF loadCrf (File crfFile) throws IOException { ObjectInputStream ois = new ObjectInputStream( new FileInputStream( crfFile ) ); CRF crf = null; // We shouldn't run into a ClassNotFound exception... try { crf = (CRF)ois.readObject(); } catch (ClassNotFoundException e) { System.err.println ("Internal MALLET error: Could not read CRF from file "+crfFile+"\n"+e); e.printStackTrace (); throw new RuntimeException (e); } ois.close(); return crf; } public Extraction extract (Object o) { // I don't think there's a polymorphic way to do this. b/c Java sucks. -cas if (o instanceof Tokenization) { return extract ((Tokenization) o); } else if (o instanceof InstanceList) { return extract ((InstanceList) o); } else { return extract (doTokenize (o)); } } private Tokenization doTokenize (Object obj) { Instance toked = new Instance (obj, null, null, null); tokenizationPipe.pipe (toked); return (Tokenization) toked.getData (); } public Extraction extract (Tokenization spans) { // We assume the input is unpiped. Instance carrier = featurePipe.pipe (new Instance (spans, null, null, null)); Sequence output = crf.transduce ((Sequence) carrier.getData ()); Extraction extraction = new Extraction (this, getTargetAlphabet()); DocumentExtraction docseq = new DocumentExtraction ("Extraction", getTargetAlphabet(), spans, output, null, backgroundTag, filter); extraction.addDocumentExtraction (docseq); return extraction; } public InstanceList pipeInstances (Iterator<Instance> source) { // I think that pipes should be associated neither with InstanceLists, nor // with Instances. -cas InstanceList toked = new InstanceList (tokenizationPipe); toked.addThruPipe (source); InstanceList piped = new InstanceList (getFeaturePipe ()); piped.addThruPipe (toked.iterator()); return piped; } /** Assumes Instance.source contains the Tokenization object. */ public Extraction extract (InstanceList ilist) { Extraction extraction = new Extraction (this, getTargetAlphabet ()); for (int i = 0; i < ilist.size(); i++) { Instance inst = ilist.get(i); Tokenization tok = (Tokenization)inst.getSource(); String name = inst.getName().toString(); Sequence input = (Sequence)inst.getData (); Sequence target = (Sequence)inst.getTarget (); Sequence output = crf.transduce(input); DocumentExtraction docseq = new DocumentExtraction (name, getTargetAlphabet(), tok, output, target, backgroundTag, filter); extraction.addDocumentExtraction (docseq); } return extraction; } public Extraction extract (Iterator<Instance> source) { Extraction extraction = new Extraction (this, getTargetAlphabet ()); // Put all the instances through both pipes, then get viterbi path InstanceList tokedList = new InstanceList (tokenizationPipe); tokedList.addThruPipe (source); InstanceList pipedList = new InstanceList (getFeaturePipe ()); pipedList.addThruPipe (tokedList.iterator()); Iterator<Instance> it1 = tokedList.iterator (); Iterator<Instance> it2 = pipedList.iterator (); while (it1.hasNext()) { Instance toked = it1.next(); Instance piped = it2.next (); Tokenization tok = (Tokenization) toked.getData(); String name = piped.getName().toString(); Sequence input = (Sequence) piped.getData (); Sequence target = (Sequence) piped.getTarget (); Sequence output = crf.transduce (input); DocumentExtraction docseq = new DocumentExtraction (name, getTargetAlphabet (), tok, output, target, backgroundTag, filter); extraction.addDocumentExtraction (docseq); } return extraction; } public TokenizationFilter getTokenizationFilter () { return filter; } public String getBackgroundTag () { return backgroundTag; } public Pipe getTokenizationPipe () { return tokenizationPipe; } public void setTokenizationPipe (Pipe tokenizationPipe) { this.tokenizationPipe = tokenizationPipe; } public Pipe getFeaturePipe () { return featurePipe; } //xxx This method is inherent dangerous!!! Should check that pipe.alphabet equals crf.alphabet public void setFeaturePipe (Pipe featurePipe) { this.featurePipe = featurePipe; } public Alphabet getInputAlphabet () { return crf.getInputAlphabet (); } public LabelAlphabet getTargetAlphabet () { return (LabelAlphabet) crf.getOutputAlphabet (); } public CRF getCrf () { return crf; } /** * Transfer some Pipes from the feature pipe to the tokenization pipe. * The feature pipe must be a SerialPipes. This will destructively modify the CRF object of the extractor. * This is useful if you have a CRF hat has been trained from a single pipe, which you need to split up * int feature and tokenization pipes */ public void slicePipes (int num) { Pipe fpipe = getFeaturePipe (); if (!(fpipe instanceof SerialPipes)) throw new IllegalArgumentException ("slicePipes: FeaturePipe must be a SerialPipes."); SerialPipes sp = (SerialPipes) fpipe; ArrayList pipes = new ArrayList (); for (int i = 0; i < num; i++) { pipes.add (sp.getPipe (0)); //sp.removePipe (0); TODO Fix this } //setTokenizationPipe (sp); TODO Fix this throw new UnsupportedOperationException ("Not yet implemented..."); } // Java serialization nonsense // Serial version 0: Initial version // Serial version 1: Add featurePipe // Serial version 2: Add filter private static final int CURRENT_SERIAL_VERSION = 2; private static final long serialVersionUID = 1; private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); int version = in.readInt (); if ((version == 0) || (featurePipe == null)) { featurePipe = (Pipe) crf.getInputPipe (); } if (version < 2) { filter = new BIOTokenizationFilter (); } } private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); out.writeInt (CURRENT_SERIAL_VERSION); } public Sequence pipeInput (Object input) { InstanceList all = new InstanceList (getFeaturePipe ()); all.add (input, null, null, null); return (Sequence) all.get (0).getData(); } }
8,543
29.623656
110
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/Record.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; import gnu.trove.THashMap; import java.util.Iterator; import cc.mallet.types.Label; /** * Created: Oct 12, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: Record.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class Record { private THashMap fieldMap; private String name; public Record (String name, LabeledSpans spans) { this.name = name; fieldMap = new THashMap (); for (int i = 0; i < spans.size(); i++) { LabeledSpan span = spans.getLabeledSpan (i); if (!span.isBackground()) { Label tag = span.getLabel (); Field field = (Field) fieldMap.get (tag); if (field == null) { field = new Field (span); fieldMap.put (tag, field); } else { field.addFiller (span); } } } } public String getName () { return name; } public Field getField (Label name) { return (Field) fieldMap.get (name); } public Iterator fieldsIterator () { return fieldMap.values ().iterator (); } }
1,524
24.416667
76
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/Span.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ // Rename to Segment, (then also Segmentation, SegmentSequence, SegmentList) // Alternatively, think about names: Annotation, AnnotationList, package cc.mallet.extract; /** A sub-section of a document, either linear or two-dimensional. * Spans are immutable. */ public interface Span { /** Returns a textual representatio of the span, suitable for XML output, e.g. */ String getText (); /** Returns a new span that is the intersection of this span and another. */ Span intersection (Span r); Object getDocument (); boolean intersects (Span r); boolean isSubspan (Span r); /** * Returns an integer index identifying the start of this span. * Beware that in some cases (e.g., for images), this may not * correspond directly to a sequence index. */ int getStartIdx (); /** * Returns an integer index identifying the end of this span. * Beware that in some cases (e.g., for images), this may not * correspond directly to a sequence index. */ int getEndIdx (); }
1,552
27.759259
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/ExactMatchComparator.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract; /** * Created: Nov 23, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: ExactMatchComparator.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $ */ public class ExactMatchComparator implements FieldComparator { public boolean matches (String fieldVal1, String fieldVal2) { return fieldVal1.equals (fieldVal2); } }
820
34.695652
83
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/test/TestDocumentViewer.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract.test; import junit.framework.*; import java.io.IOException; import java.io.File; import cc.mallet.extract.CRFExtractor; import cc.mallet.extract.DocumentViewer; import cc.mallet.extract.Extraction; import cc.mallet.fst.CRF; import cc.mallet.fst.CRFTrainerByLabelLikelihood; import cc.mallet.fst.tests.TestCRF; import cc.mallet.fst.tests.TestMEMM; import cc.mallet.pipe.Pipe; import cc.mallet.pipe.iterator.ArrayIterator; import cc.mallet.types.InstanceList; /** * Created: Mar 30, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: TestDocumentViewer.java,v 1.1 2007/10/22 21:38:02 mccallum Exp $ */ public class TestDocumentViewer extends TestCase { public TestDocumentViewer (String name) { super (name); } public static Test suite () { return new TestSuite (TestDocumentViewer.class); } private File outputDir = new File ("extract"); public void testSpaceViewer () throws IOException { Pipe pipe = TestMEMM.makeSpacePredictionPipe (); String[] data0 = { TestCRF.data[0] }; String[] data1 = { TestCRF.data[1] }; InstanceList training = new InstanceList (pipe); training.addThruPipe (new ArrayIterator (data0)); InstanceList testing = new InstanceList (pipe); testing.addThruPipe (new ArrayIterator (data1)); CRF crf = new CRF (pipe, null); crf.addFullyConnectedStatesForLabels (); CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood (crf); crft.trainIncremental (training); CRFExtractor extor = TestLatticeViewer.hackCrfExtor (crf); Extraction extraction = extor.extract (new ArrayIterator (data1)); if (!outputDir.exists ()) outputDir.mkdir (); DocumentViewer.writeExtraction (outputDir, extraction); } public static void main (String[] args) throws Throwable { TestSuite theSuite; if (args.length > 0) { theSuite = new TestSuite (); for (int i = 0; i < args.length; i++) { theSuite.addTest (new TestDocumentViewer (args[i])); } } else { theSuite = (TestSuite) suite (); } junit.textui.TestRunner.run (theSuite); } }
2,610
29.360465
81
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/test/TestDocumentExtraction.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract.test; import junit.framework.*; import java.util.regex.Pattern; import cc.mallet.extract.*; import cc.mallet.types.Label; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.LabelSequence; import cc.mallet.util.CharSequenceLexer; /** * Created: Oct 12, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: TestDocumentExtraction.java,v 1.1 2007/10/22 21:38:02 mccallum Exp $ */ public class TestDocumentExtraction extends TestCase { public TestDocumentExtraction (String name) { super (name); } public static Test suite () { return new TestSuite (TestDocumentExtraction.class); } public void testToXml () { LabelAlphabet dict = new LabelAlphabet (); String document = "the quick brown fox leapt over the lazy dog"; StringTokenization toks = new StringTokenization (document, new CharSequenceLexer ()); Label O = dict.lookupLabel ("O"); Label ANML = dict.lookupLabel ("ANIMAL"); Label VB = dict.lookupLabel ("VERB"); LabelSequence tags = new LabelSequence (new Label[] { O, ANML, ANML, ANML, VB, O, O, ANML, ANML }); DocumentExtraction extr = new DocumentExtraction ("Test", dict, toks, tags, "O"); String actualXml = extr.toXmlString(); String expectedXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n" + "<doc>the <ANIMAL>quick brown fox </ANIMAL><VERB>leapt </VERB>over the <ANIMAL>lazy dog</ANIMAL></doc>\r\n"; assertEquals (expectedXml, actualXml); } public void testToXmlBIO () { LabelAlphabet dict = new LabelAlphabet (); String document = "the quick brown fox leapt over the lazy dog"; StringTokenization toks = new StringTokenization (document, new CharSequenceLexer ()); Label O = dict.lookupLabel ("O"); Label BANML = dict.lookupLabel ("B-ANIMAL"); Label ANML = dict.lookupLabel ("ANIMAL"); Label BVB = dict.lookupLabel ("B-VERB"); Label VB = dict.lookupLabel ("I-VERB"); LabelSequence tags = new LabelSequence (new Label[] { O, BANML, ANML, BANML, BVB, VB, O, ANML, ANML }); DocumentExtraction extr = new DocumentExtraction ("Test", dict, toks, tags, null, "O", new BIOTokenizationFilter()); String actualXml = extr.toXmlString(); String expectedXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n" + "<doc>the <ANIMAL>quick brown </ANIMAL><ANIMAL>fox </ANIMAL><VERB>leapt over </VERB>the <ANIMAL>lazy dog</ANIMAL></doc>\r\n"; assertEquals (expectedXml, actualXml); } public void testNestedToXML () { LabelAlphabet dict = new LabelAlphabet (); String document = "the quick brown fox leapt over the lazy dog"; StringTokenization toks = new StringTokenization (document, new CharSequenceLexer ()); Label O = dict.lookupLabel ("O"); Label ANML = dict.lookupLabel ("ANIMAL"); Label VB = dict.lookupLabel ("VERB"); Label JJ = dict.lookupLabel ("ADJ"); Label MAMMAL = dict.lookupLabel ("MAMMAL"); LabelSequence tags = new LabelSequence (new Label[] { O, ANML, ANML, ANML, VB, O, ANML, ANML, ANML }); LabeledSpans spans = new DefaultTokenizationFilter ().constructLabeledSpans (dict, document, O, toks, tags); Span foxToken = toks.subspan (3, 4); spans.add (new LabeledSpan (foxToken, MAMMAL, false)); Span bigDogToken = toks.subspan (7, 8); spans.add (new LabeledSpan (bigDogToken, JJ, false)); DocumentExtraction extr = new DocumentExtraction ("Test", dict, toks, spans, null, "O"); String actualXml = extr.toXmlString(); String expectedXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n" + "<doc>the <ANIMAL>quick brown <MAMMAL>fox </MAMMAL></ANIMAL><VERB>leapt </VERB>over <ANIMAL>the <ADJ>lazy </ADJ>dog</ANIMAL></doc>\r\n"; assertEquals (expectedXml, actualXml); } public void testNestedXMLTokenizationFilter () { LabelAlphabet dict = new LabelAlphabet (); String document = "the quick brown fox leapt over the lazy dog"; StringTokenization toks = new StringTokenization (document, new CharSequenceLexer ()); Label O = dict.lookupLabel ("O"); Label ANML = dict.lookupLabel ("ANIMAL"); Label ANML_MAMM = dict.lookupLabel ("ANIMAL|MAMMAL"); Label VB = dict.lookupLabel ("VERB"); Label ANML_JJ = dict.lookupLabel ("ANIMAL|ADJ"); Label ANML_JJ_MAMM = dict.lookupLabel ("ANIMAL|ADJ|MAMMAL"); LabelSequence tags = new LabelSequence (new Label[] { O, ANML, ANML, ANML_MAMM, VB, O, ANML, ANML_JJ, ANML_JJ_MAMM }); DocumentExtraction extr = new DocumentExtraction ("Test", dict, toks, tags, null, "O", new HierarchicalTokenizationFilter ()); String actualXml = extr.toXmlString(); String expectedXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n" + "<doc>the <ANIMAL>quick brown <MAMMAL>fox </MAMMAL></ANIMAL><VERB>leapt </VERB>over <ANIMAL>the <ADJ>lazy <MAMMAL>dog</MAMMAL></ADJ></ANIMAL></doc>\r\n"; assertEquals (expectedXml, actualXml); // Test the ignore function extr = new DocumentExtraction ("Test", dict, toks, tags, null, "O", new HierarchicalTokenizationFilter (Pattern.compile ("AD.*"))); actualXml = extr.toXmlString(); expectedXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n" + "<doc>the <ANIMAL>quick brown <MAMMAL>fox </MAMMAL></ANIMAL><VERB>leapt </VERB>over <ANIMAL>the lazy <MAMMAL>dog</MAMMAL></ANIMAL></doc>\r\n"; assertEquals (expectedXml, actualXml); } public static void main (String[] args) throws Throwable { TestSuite theSuite; if (args.length > 0) { theSuite = new TestSuite (); for (int i = 0; i < args.length; i++) { theSuite.addTest (new TestDocumentExtraction (args[i])); } } else { theSuite = (TestSuite) suite (); } junit.textui.TestRunner.run (theSuite); } }
6,253
39.61039
165
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/test/TestLatticeViewer.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract.test; import junit.framework.Test; import junit.framework.TestCase; import junit.framework.TestSuite; import java.io.*; import cc.mallet.extract.CRFExtractor; import cc.mallet.extract.Extraction; import cc.mallet.extract.LatticeViewer; import cc.mallet.fst.CRF; import cc.mallet.fst.CRFTrainerByLabelLikelihood; import cc.mallet.fst.MEMM; import cc.mallet.fst.MEMMTrainer; import cc.mallet.fst.TokenAccuracyEvaluator; import cc.mallet.fst.TransducerEvaluator; import cc.mallet.fst.tests.TestCRF; import cc.mallet.fst.tests.TestMEMM; import cc.mallet.pipe.Pipe; import cc.mallet.pipe.SerialPipes; import cc.mallet.pipe.iterator.ArrayIterator; import cc.mallet.types.InstanceList; /** * Created: Oct 31, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: TestLatticeViewer.java,v 1.1 2007/10/22 21:38:02 mccallum Exp $ */ public class TestLatticeViewer extends TestCase { public TestLatticeViewer (String name) { super (name); } private static File htmlFile = new File ("errors.html"); private static File latticeFile = new File ("lattice.html"); private static File htmlDir = new File ("html/"); public void testSpaceViewer () throws FileNotFoundException { Pipe pipe = TestMEMM.makeSpacePredictionPipe (); String[] data0 = { TestCRF.data[0] }; String[] data1 = { TestCRF.data[1] }; InstanceList training = new InstanceList (pipe); training.addThruPipe (new ArrayIterator (data0)); InstanceList testing = new InstanceList (pipe); testing.addThruPipe (new ArrayIterator (data1)); CRF crf = new CRF (pipe, null); crf.addFullyConnectedStatesForLabels (); CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood (crf); crft.trainIncremental (training); CRFExtractor extor = hackCrfExtor (crf); Extraction extration = extor.extract (new ArrayIterator (data1)); PrintStream out = new PrintStream (new FileOutputStream (htmlFile)); LatticeViewer.extraction2html (extration, extor, out); out.close(); out = new PrintStream (new FileOutputStream (latticeFile)); LatticeViewer.extraction2html (extration, extor, out, true); out.close(); } static CRFExtractor hackCrfExtor (CRF crf) { Pipe[] newPipes = new Pipe [3]; SerialPipes pipes = (SerialPipes) crf.getInputPipe (); for (int i = 0; i < 3; i++) { Pipe p0 = pipes.getPipe (0); //pipes.removePipe (0); TODO Fix me //p0.setParent (null); newPipes[i] = p0; } Pipe tokPipe = new SerialPipes (newPipes); CRFExtractor extor = new CRFExtractor (crf, (Pipe)tokPipe); return extor; } public void testDualSpaceViewer () throws IOException { Pipe pipe = TestMEMM.makeSpacePredictionPipe (); String[] data0 = { TestCRF.data[0] }; String[] data1 = TestCRF.data; InstanceList training = new InstanceList (pipe); training.addThruPipe (new ArrayIterator (data0)); InstanceList testing = new InstanceList (pipe); testing.addThruPipe (new ArrayIterator (data1)); CRF crf = new CRF (pipe, null); crf.addFullyConnectedStatesForLabels (); CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood (crf); TokenAccuracyEvaluator eval = new TokenAccuracyEvaluator (new InstanceList[] {training, testing}, new String[] {"Training", "Testing"}); for (int i = 0; i < 5; i++) { crft.train (training, 1); eval.evaluate(crft); } CRFExtractor extor = hackCrfExtor (crf); Extraction e1 = extor.extract (new ArrayIterator (data1)); Pipe pipe2 = TestMEMM.makeSpacePredictionPipe (); InstanceList training2 = new InstanceList (pipe2); training2.addThruPipe (new ArrayIterator (data0)); InstanceList testing2 = new InstanceList (pipe2); testing2.addThruPipe (new ArrayIterator (data1)); MEMM memm = new MEMM (pipe2, null); memm.addFullyConnectedStatesForLabels (); MEMMTrainer memmt = new MEMMTrainer (memm); TransducerEvaluator memmeval = new TokenAccuracyEvaluator (new InstanceList[] {training2, testing2}, new String[] {"Training2", "Testing2"}); memmt.train (training2, 5); memmeval.evaluate(memmt); CRFExtractor extor2 = hackCrfExtor (memm); Extraction e2 = extor2.extract (new ArrayIterator (data1)); if (!htmlDir.exists ()) htmlDir.mkdir (); LatticeViewer.viewDualResults (htmlDir, e1, extor, e2, extor2); } public static Test suite () { return new TestSuite (TestLatticeViewer.class); } public static void main (String[] args) throws Throwable { TestSuite theSuite; if (args.length > 0) { theSuite = new TestSuite (); for (int i = 0; i < args.length; i++) { theSuite.addTest (new TestLatticeViewer (args[i])); } } else { theSuite = (TestSuite) suite (); } junit.textui.TestRunner.run (theSuite); } }
5,324
31.469512
145
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/test/TestPerDocumentF1Evaluator.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract.test; import junit.framework.Test; import junit.framework.TestCase; import junit.framework.TestSuite; import java.io.ByteArrayOutputStream; import java.io.PrintStream; import java.io.PrintWriter; import java.io.OutputStreamWriter; import cc.mallet.extract.*; import cc.mallet.pipe.*; import cc.mallet.pipe.iterator.ArrayIterator; import cc.mallet.types.Instance; import cc.mallet.types.InstanceList; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.Sequence; import cc.mallet.util.CharSequenceLexer; /** * Created: Nov 18, 2004 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: TestPerDocumentF1Evaluator.java,v 1.1 2007/10/22 21:38:02 mccallum Exp $ */ public class TestPerDocumentF1Evaluator extends TestCase { public TestPerDocumentF1Evaluator (String name) { super (name); } public static Test suite () { return new TestSuite (TestPerDocumentF1Evaluator.class); } private static String[] testPred = { "<eater>the big red fox</eater> did it", "it was done by <meal>the dog</meal>", "<eater>the cat</eater> ate the <meal>canary</meal>", "<meal>the hamburger</meal> was eaten by the kid", "<eater>the dog</eater> was eaten with zest", "four score and seven years <meal>ago</meal>" }; private static String[] testTrue = { "<eater>the big red fox</eater> did it", "it was done by <eater>the dog</eater>", "<eater>the cat</eater> ate <meal>the canary</meal>", "<meal>the hamburger</meal> was eaten by <eater>the kid</eater>", "<meal>the dog</meal> was eaten with zest", "four score and seven years ago" }; private Extraction createExtractionFrom (String[] predStrings, String[] trueStrings) { Pipe pipe = new SerialPipes (new Pipe[] { new SGML2TokenSequence (new CharSequenceLexer (CharSequenceLexer.LEX_NONWHITESPACE_CLASSES ), "O"), new Target2LabelSequence (), new PrintInputAndTarget (), }); InstanceList pred = new InstanceList (pipe); pred.addThruPipe (new ArrayIterator (predStrings)); InstanceList targets = new InstanceList (pipe); targets.addThruPipe (new ArrayIterator (trueStrings)); LabelAlphabet dict = (LabelAlphabet) pipe.getTargetAlphabet (); Extraction extraction = new Extraction (null, dict); for (int i = 0; i < pred.size(); i++) { Instance aPred = pred.get (i); Instance aTarget = targets.get (i); Tokenization input = (Tokenization) aPred.getData (); Sequence predSeq = (Sequence) aPred.getTarget (); Sequence targetSeq = (Sequence) aTarget.getTarget (); DocumentExtraction docextr = new DocumentExtraction ("TEST"+i, dict, input, predSeq, targetSeq, "O"); extraction.addDocumentExtraction (docextr); } return extraction; } private static final String testAExpected = "Testing per-document F1\nName\tP\tR\tF1\n" + "eater\t0.6667\t0.5\t0.5714\n" + "O\t0\t1\t0\n" + "meal\t0.25\t0.3333\t0.2857\n" + "OVERALL (micro-averaged) P=0.4286 R=0.4286 F1=0.4286\n" + "OVERALL (macro-averaged) F1=0.4286\n\n"; public void testPerDocEval () { Extraction extraction = createExtractionFrom (testPred, testTrue); PerDocumentF1Evaluator eval = new PerDocumentF1Evaluator (); ByteArrayOutputStream out = new ByteArrayOutputStream (); eval.setErrorOutputStream (System.out); eval.evaluate ("Testing", extraction, new PrintWriter (new OutputStreamWriter (out), true)); String output = out.toString (); assertEquals (testAExpected, output); } private static final String[] mpdPred = { "<title>Wizard of Oz</title> by <author>John Smith</author> and <author>Adam Felber</author>", "<title>Jisp Boo Fuzz by</title> the estimable <title>Rich Q. Doe</title> and <author>Frank Wilson</author>", "<title>Howdy Doody</title> if you think this is Mr. nonsense <author>don't you huh</author>", }; private static final String[] mpdTrue = { "<title>Wizard of Oz</title> by <author>John Smith</author> and <author>Adam Felber</author>", "<title>Jisp Boo Fuzz</title> by the estimable <author>Rich Q. Doe</author> and <author>Frank Wilson</author>", "<title>Howdy Doody</title> if <title>you</title> think this is <title>Mr.</title> <author> nonsense don't you huh</author>", }; private static final String mpdExpected = "Testing SEGMENT counts\nName\tCorrect\tPred\tTarget\n" + "title\t2\t4\t5\n" + "O\t0\t0\t0\n" + "author\t3\t4\t5\n" + "\nTesting per-field F1\n" + "Name\tP\tR\tF1\n" + "title\t0.5\t0.4\t0.4444\n" + "O\t0\t1\t0\n" + "author\t0.75\t0.6\t0.6667\n" + "OVERALL (micro-averaged) P=0.625 R=0.5 F1=0.5556\n" + "OVERALL (macro-averaged) F1=0.5556\n\n"; public void testPerFieldEval () { Extraction extraction = createExtractionFrom (mpdPred, mpdTrue); PerFieldF1Evaluator eval = new PerFieldF1Evaluator (); ByteArrayOutputStream out = new ByteArrayOutputStream (); eval.evaluate ("Testing", extraction, new PrintStream (out)); assertEquals (mpdExpected, out.toString()); } public void testToStdout () { Extraction extraction = createExtractionFrom (mpdPred, mpdTrue); PerFieldF1Evaluator eval = new PerFieldF1Evaluator (); eval.evaluate (extraction); System.out.println ("*** Please verify that something was output above."); } private static final String[] punctPred = { "<title>Wizard of Oz,</title> by <author>John Smith</author> and <author>Adam Felber</author>", "<title>Jisp Boo Fuzz by</title> the estimable <title>Rich Q. Doe</title> and <author>Frank Wilson</author>", "<title>Howdy Doody</title>!, if you think this is Mr. nonsense <author>don't you huh</author>", }; private static final String[] punctTrue = { "<title>Wizard of Oz</title>, by <author>John Smith</author> and <author>Adam Felber</author>", "<title>Jisp Boo Fuzz</title> by the estimable <author>Rich Q. Doe</author> and <author>Frank Wilson</author>", "<title>Howdy Doody!</title>, if <title>you</title> think this is <title>Mr.</title> <author> nonsense don't you huh</author>", }; //xxx Currently fails because grabbing the field span for Howdy Doody! grabs the </title> as // well. I think this is because getting the text subspan goes to the start of the next, // rather than the end of the last. It seems like that should be changed, but I'd need to // think about the ikmplications for Rexa before doing this. public void testPunctuationIgnoringEvaluator () { Extraction extraction = createExtractionFrom (punctPred, punctTrue); PerFieldF1Evaluator eval = new PerFieldF1Evaluator (); eval.setComparator (new PunctuationIgnoringComparator ()); eval.setErrorOutputStream (System.out); ByteArrayOutputStream out = new ByteArrayOutputStream (); eval.evaluate ("Testing", extraction, new PrintStream (out)); assertEquals (mpdExpected, out.toString()); } public void testFieldCleaning () { Extraction extraction = createExtractionFrom (punctPred, punctTrue); extraction.cleanFields (new RegexFieldCleaner ("<.*?>|,|!")); PerFieldF1Evaluator eval = new PerFieldF1Evaluator (); ByteArrayOutputStream out = new ByteArrayOutputStream (); eval.evaluate ("Testing", extraction, new PrintStream (out)); assertEquals (mpdExpected, out.toString()); } public static void main (String[] args) throws Throwable { TestSuite theSuite; if (args.length > 0) { theSuite = new TestSuite (); for (int i = 0; i < args.length; i++) { theSuite.addTest (new TestPerDocumentF1Evaluator (args[i])); } } else { theSuite = (TestSuite) suite (); } junit.textui.TestRunner.run (theSuite); } }
8,301
38.345972
131
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/extract/pipe/TokenSequence2Tokenization.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.extract.pipe; import java.util.ArrayList; import cc.mallet.extract.StringSpan; import cc.mallet.extract.StringTokenization; import cc.mallet.extract.Tokenization; import cc.mallet.pipe.Pipe; import cc.mallet.types.Instance; import cc.mallet.types.Token; import cc.mallet.types.TokenSequence; /** * Heuristically converts a simple token sequence into a Tokenization * that can be used with all the extract package goodies. * <P> * Users of this class should be warned that the tokens' features and properties * list are moved over directly, with no deep-copying. * * Created: Jan 21, 2005 * * @author <A HREF="mailto:[email protected]>[email protected]</A> * @version $Id: TokenSequence2Tokenization.java,v 1.1 2007/10/22 21:38:00 mccallum Exp $ */ public class TokenSequence2Tokenization extends Pipe { public Instance pipe (Instance carrier) { Object data = carrier.getData (); if (data instanceof Tokenization) { // we're done } else if (data instanceof TokenSequence) { StringBuffer buf = new StringBuffer (); TokenSequence ts = (TokenSequence) data; StringTokenization spans = new StringTokenization (buf); // I can use a StringBuffer as the doc! Awesome! for (int i = 0; i < ts.size(); i++) { Token token = ts.get(i); int start = buf.length (); buf.append (token.getText()); int end = buf.length(); StringSpan span = new StringSpan (buf, start, end); span.setFeatures (token.getFeatures ()); span.setProperties (token.getProperties ()); spans.add (span); buf.append (" "); } carrier.setData (spans); } else { throw new IllegalArgumentException ("Can't convert "+data+" to Tokenization."); } return carrier; } }
2,237
31.911765
112
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/FeatureSequenceWithBigrams.java
/* Copyright (C) 2005 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.types; import java.io.ObjectOutputStream; import java.io.IOException; import java.io.ObjectInputStream; import cc.mallet.pipe.TokenSequenceRemoveStopwords; /** A FeatureSequence with a parallel record of bigrams, kept in a separate dictionary * @author <a href="mailto:[email protected]">Andrew McCallum</a> */ public class FeatureSequenceWithBigrams extends FeatureSequence { public final static String deletionMark = "NextTokenDeleted"; Alphabet biDictionary; int[] biFeatures; public FeatureSequenceWithBigrams (Alphabet dict, Alphabet bigramDictionary, TokenSequence ts) { super (dict, ts.size()); int len = ts.size(); this.biDictionary = bigramDictionary; this.biFeatures = new int[len]; Token t, pt = null; for (int i = 0; i < len; i++) { t = ts.get(i); super.add(t.getText()); if (pt != null && pt.getProperty(deletionMark) == null) biFeatures[i] = biDictionary == null ? 0 : biDictionary.lookupIndex(pt.getText()+"_"+t.getText(), true); else biFeatures[i] = -1; pt = t; } } public Alphabet getBiAlphabet () { return biDictionary; } public final int getBiIndexAtPosition (int pos) { return biFeatures[pos]; } public Object getObjectAtPosition (int pos) { return biFeatures[pos] == -1 ? null : (biDictionary == null ? null : biDictionary.lookupObject (biFeatures[pos])); } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private static final int NULL_INTEGER = -1; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject (biDictionary); out.writeInt (biFeatures.length); for (int i = 0; i < biFeatures.length; i++) out.writeInt (biFeatures[i]); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); biDictionary = (Alphabet) in.readObject (); int featuresLength = in.readInt(); biFeatures = new int[featuresLength]; for (int i = 0; i < featuresLength; i++) biFeatures[i] = in.readInt (); } }
2,551
30.121951
116
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/HashedSparseVector.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Sparse, yet its (present) values can be changed. You can't, however, add values that were (zero and) missing. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.types; import java.util.HashMap; import java.util.Iterator; import java.util.Arrays; import java.util.logging.*; import java.io.*; import cc.mallet.types.Alphabet; import cc.mallet.types.FeatureSequence; import cc.mallet.types.Vector; import cc.mallet.util.MalletLogger; import cc.mallet.util.PropertyList; import gnu.trove.TIntIntHashMap; public class HashedSparseVector extends SparseVector implements Serializable { private static Logger logger = MalletLogger.getLogger(SparseVector.class.getName()); TIntIntHashMap index2location; int maxIndex; public HashedSparseVector (int[] indices, double[] values, int capacity, int size, boolean copy, boolean checkIndicesSorted, boolean removeDuplicates) { super (indices, values, capacity, size, copy, checkIndicesSorted, removeDuplicates); assert (indices != null); } /** Create an empty vector */ public HashedSparseVector () { super (new int[0], new double[0], 0, 0, false, false, false); } /** Create non-binary vector, possibly dense if "featureIndices" or possibly sparse, if not */ public HashedSparseVector (int[] featureIndices, double[] values) { super (featureIndices, values); } /** Create binary vector */ public HashedSparseVector (int[] featureIndices) { super (featureIndices); } // xxx We need to implement this in FeatureVector subclasses public ConstantMatrix cloneMatrix () { return new HashedSparseVector (indices, values); } public ConstantMatrix cloneMatrixZeroed () { assert (values != null); int[] newIndices = new int[indices.length]; System.arraycopy (indices, 0, newIndices, 0, indices.length); HashedSparseVector sv = new HashedSparseVector (newIndices, new double[values.length], values.length, values.length, false, false, false); // share index2location trick ala IndexedSparseVector if (index2location != null) { sv.index2location = index2location; sv.maxIndex = maxIndex; } return sv; } // Methods that change values public void indexVector () { if ((index2location == null) && (indices.length > 0)) setIndex2Location (); } private void setIndex2Location () { //System.out.println ("HashedSparseVector setIndex2Location indices.length="+indices.length+" maxindex="+indices[indices.length-1]); assert (index2location == null); assert (indices.length > 0); this.maxIndex = indices[indices.length - 1]; this.index2location = new TIntIntHashMap (numLocations ()); //index2location.setDefaultValue (-1); for (int i = 0; i < indices.length; i++) index2location.put (indices[i], i); } public final void setValue (int index, double value) { if (index2location == null) setIndex2Location (); int location = index2location.get(index); if (index2location.contains (index)) values[location] = value; else throw new IllegalArgumentException ("Trying to set value that isn't present in HashedSparseVector"); } public final void setValueAtLocation (int location, double value) { values[location] = value; } // I dislike this name, but it's consistent with DenseVector. -cas public void columnPlusEquals (int index, double value) { if (index2location == null) setIndex2Location (); int location = index2location.get(index); if (index2location.contains (index)) values[location] += value; else throw new IllegalArgumentException ("Trying to set value that isn't present in HashedSparseVector"); } public final double dotProduct (DenseVector v) { double ret = 0; if (values == null) for (int i = 0; i < indices.length; i++) ret += v.value(indices[i]); else for (int i = 0; i < indices.length; i++) ret += values[i] * v.value(indices[i]); return ret; } public final double dotProduct (SparseVector v) { if (indices.length == 0) return 0; if (index2location == null) setIndex2Location (); double ret = 0; int vNumLocs = v.numLocations(); if (values == null) { // this vector is binary for (int i = 0; i < vNumLocs; i++) { int index = v.indexAtLocation(i); if (index > maxIndex) break; if (index2location.contains(index)) ret += v.valueAtLocation (i); } } else { for (int i = 0; i < vNumLocs; i++) { int index = v.indexAtLocation(i); if (index > maxIndex) break; if (index2location.containsKey(index)) { ret += values[ index2location.get(index) ] * v.valueAtLocation (i); } //int location = index2location.get(index); //if (location >= 0) // ret += values[location] * v.valueAtLocation (i); } } return ret; } public final void plusEqualsSparse (SparseVector v, double factor) { if (indices.length == 0) return; if (index2location == null) setIndex2Location (); int vNumLocs = v.numLocations(); for (int i = 0; i < vNumLocs; i++) { int index = v.indexAtLocation(i); if (index > maxIndex) break; if (index2location.containsKey(index)) { values[ index2location.get(index) ] += v.valueAtLocation (i) * factor; } //int location = index2location.get(index); //if (location >= 0) // values[location] += v.valueAtLocation (i) * factor; } } public final void plusEqualsSparse (SparseVector v) { if (indices.length == 0) return; if (index2location == null) setIndex2Location (); for (int i = 0; i < v.numLocations(); i++) { int index = v.indexAtLocation(i); if (index > maxIndex) break; int location = index2location.get(index); if (index2location.contains (index)) values[location] += v.valueAtLocation (i); } } public final void setAll (double v) { for (int i = 0; i < values.length; i++) values[i] = v; } //Serialization private static final long serialVersionUID = 1; // Version history: // 0 == Wrote out index2location. Probably a bad idea. private static final int CURRENT_SERIAL_VERSION = 1; static final int NULL_INTEGER = -1; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeInt (maxIndex); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); maxIndex = in.readInt (); if (version == 0) { // gobble up index2location Object obj = in.readObject (); if (obj != null && !(obj instanceof TIntIntHashMap)) { throw new IOException ("Unexpected object in de-serialization: "+obj); } } } }
7,248
27.096899
134
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/StringKernel.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Aron Culotta <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.types; import java.util.LinkedHashMap; import cc.mallet.util.*; /** Computes a similarity metric between two strings, based on counts of common subsequences of characters. See Lodhi et al "String kernels for text classification." Optionally caches previous kernel computations. */ public class StringKernel extends LinkedHashMap { // all words to lowercase static final boolean DEFAULT_NORMALIZE_CASE = true; // gap penalty static final double DEFAULT_LAMBDA = 0.5; // max length of subsequences to compare static final int DEFAULT_LENGTH = 3; // true if we should cache previous kernel // computations. Recommended! static final boolean DEFAULT_CACHE = true; boolean normalizeCase; double lambda; int n; boolean cache; /** @param norm true if we lowercase all strings @param lam 0-1 penalty for gaps between matches. @param length max length of subsequences to compare @param cache true if we should cache previous kernel computations. recommended! */ public StringKernel (boolean norm, double lam, int length, boolean cache) { this.normalizeCase = norm; this.lambda = lam; this.n = length; this.cache = cache; } public StringKernel () { this(DEFAULT_NORMALIZE_CASE, DEFAULT_LAMBDA, DEFAULT_LENGTH, DEFAULT_CACHE); } public StringKernel (boolean norm, double lam, int length) { this (norm, lam, length, DEFAULT_CACHE); } /** Computes the normalized string kernel between two strings. @param s string 1 @param t string 2 @return 0-1 value, where 1 is exact match. */ public double K (String s, String t) { // compute self kernels if not in hashmap double ss,tt; Double sstmp = (Double)get (s); Double tttmp = (Double)get (t); if (sstmp == null) { ss = sK (s,s,n); if (cache) put (s, new Double (ss)); } else ss = sstmp.doubleValue(); if (tttmp == null) { tt = sK (t,t,n); if (cache) put (t, new Double (tt)); } else tt = tttmp.doubleValue(); double st = sK (s,t,n); // normalize return st / Math.sqrt (ss*tt); } private double sK(String s, String t, int n) { double sum, r = 0.0; int i, j, k; int slen = s.length(); int tlen = t.length(); double [][]K = new double[n+1][(slen+1)*(tlen+1)]; for (j = 0; j < (slen+1); j++) for (k = 0; k < (tlen+1); k++) K[0][k*(slen+1) + j] = 1; for (i = 0; i < n; i++) { for (j = 0; j < slen; j++) { sum = 0.0; for (k = 0; k < tlen; k++) { if (t.charAt(k) == s.charAt(j)) { sum += K[i][k*(slen+1)+j]; } K[i+1][(k+1)*(slen+1)+j+1] = K[i+1][(k+1)*(slen+1)+j] + sum; } } r = r + K[i+1][tlen*(slen+1)+slen]; } return r; } static CommandOption.String string1Option = new CommandOption.String (StringKernel.class, "string1", "FILE", true, null, "String one", null); static CommandOption.String string2Option = new CommandOption.String (StringKernel.class, "string2", "FILE", true, null, "String two", null); static final CommandOption.List commandOptions = new CommandOption.List ( "String Kernel.", new CommandOption[] { string1Option, string2Option, }); /** Return string kernel between two strings*/ public static void main (String[] args) throws Exception { commandOptions.process (args); StringKernel sk = new StringKernel (); System.err.println ("String Kernel for " + string1Option.value + " and " + string2Option.value + " is " + sk.K (string1Option.value, string2Option.value)); } }
4,039
27.055556
161
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/FeatureInducer.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.types; import java.util.logging.*; import java.util.BitSet; import java.io.*; import cc.mallet.util.MalletLogger; /* Where will the new features get extracted in the Pipe? */ public class FeatureInducer implements Serializable { private static Logger logger = MalletLogger.getLogger(FeatureInducer.class.getName()); static boolean addMaskedFeatures = false; static int minTrainingListSize = 20; // Only one of the following two will be non-null RankedFeatureVector.Factory ranker; RankedFeatureVector.PerLabelFactory perLabelRanker; int beam1 = 300; int beam2 = 1000; FeatureConjunction.List fcl; // xxx Could perhaps build a hash value for each feature that measures its distribution // over instances, and avoid conjunctions of features that are *exact* duplicates // with this hash value. public FeatureInducer (RankedFeatureVector.Factory ranker, InstanceList ilist, int numNewFeatures, int beam1, int beam2) { this.fcl = new FeatureConjunction.List (); this.beam1 = beam1; this.beam2 = beam2; if (ilist.size() < minTrainingListSize) { logger.info ("FeatureInducer not inducing from less than "+minTrainingListSize+" features."); return; } Alphabet tmpDV = (Alphabet) ilist.getDataAlphabet().clone(); FeatureSelection featuresSelected = ilist.getFeatureSelection(); InstanceList tmpilist = new InstanceList (tmpDV, ilist.getTargetAlphabet()); RankedFeatureVector gg = ranker.newRankedFeatureVector (ilist); logger.info ("Rank values before this round of conjunction-building"); int n = Math.min (200, gg.numLocations()); for (int i = 0; i < n; i++) logger.info ("Rank="+i+' '+Double.toString(gg.getValueAtRank(i)) + ' ' + gg.getObjectAtRank(i).toString()); //for (int i = gg.numLocations()-200; i < gg.numLocations(); i++) //System.out.println ("i="+i+' '+Double.toString(gg.getValueAtRank(i)) + ' ' + gg.getObjectAtRank(i).toString()); //System.out.println (""); FeatureSelection fsMin = new FeatureSelection (tmpDV); FeatureSelection fsMax = new FeatureSelection (tmpDV); int minBeam = Math.min (beam1, beam2); int maxBeam = Math.max (beam1, beam2); logger.info ("Using minBeam="+minBeam+" maxBeam="+maxBeam); int max = maxBeam < gg.numLocations() ? maxBeam : gg.numLocations(); for (int b = 0; b < max; b++) { if (gg.getValueAtRank(b) == 0) break; int index = gg.getIndexAtRank(b); fsMax.add (index); if (b < minBeam) fsMin.add (index); } // Prevent it from searching through all of gg2 //double minGain = gg.getValueAtRank(maxBeam*2); // No, there are so many "duplicate" features, that it ends up only adding a few each round. //double minGain = Double.NEGATIVE_INFINITY; // Just use a constant; anything less than this must not have enough support in the data. //double minGain = 5; double minGain = 0; //// xxx Temporarily remove all feature conjunction pruning //System.out.println ("FeatureInducer: Temporarily not pruning any feature conjunctions from consideration."); //fsMin = fsMax = null; minGain = Double.NEGATIVE_INFINITY; //int[] conjunctions = new int[beam]; //for (int b = 0; b < beam; b++) //conjunctions[b] = gg.getIndexAtRank(b); gg = null; // Allow memory to be freed for (int i = 0; i < ilist.size(); i++) { Instance inst = ilist.get(i); FeatureVector fv = (FeatureVector) inst.getData (); tmpilist.add (new Instance (new FeatureVector (fv, tmpDV, fsMin, fsMax), inst.getTarget(), inst.getName(), inst.getSource()), ilist.getInstanceWeight(i)); } logger.info ("Calculating gradient gain of conjunctions, vocab size = "+tmpDV.size()); RankedFeatureVector gg2 = ranker.newRankedFeatureVector (tmpilist); for (int i = 0; i < 200 && i < gg2.numLocations(); i++) logger.info ("Conjunction Rank="+i+' '+Double.toString(gg2.getValueAtRank(i)) + ' ' + gg2.getObjectAtRank(i).toString()); int numFeaturesAdded = 0; Alphabet origV = ilist.getDataAlphabet(); int origVSize = origV.size(); nextfeatures: for (int i = 0; i < gg2.numLocations(); i++) { double gain = gg2.getValueAtRank (i); if (gain < minGain) { // There are no more new features we could add, because they all have no more gain // than the features we started with logger.info ("Stopping feature induction: gain["+i+"]="+gain+", minGain="+minGain); break; } if (gg2.getIndexAtRank(i) >= origVSize) { // First disjunct above so that we also add singleton features that are currently masked out // xxx If addMaskedFeatures == true, we should still check the mask, so we don't // "add" and print features that are already unmasked String s = (String) gg2.getObjectAtRank(i); int[] featureIndices = FeatureConjunction.getFeatureIndices(origV, s); // Make sure that the new conjunction doesn't contain duplicate features if (FeatureConjunction.isValidConjunction (featureIndices) // Don't add features with exactly the same gain value: they are probably an // "exactly overlapping duplicate" // xxx Note that this might actually increase over-fitting! && (i == 0 || gg2.getValueAtRank(i-1) != gg2.getValueAtRank(i)) ) { double newFeatureValue = gg2.getValueAtRank(i); // Don't add new conjunctions that have no more gain than any of their constituents for (int j = 0; j < featureIndices.length; j++) if (gg2.value (featureIndices[j]) >= newFeatureValue) { //System.out.println ("Skipping feature that adds no gain "+newFeatureValue+' '+s); continue nextfeatures; } fcl.add (new FeatureConjunction (origV, featureIndices)); int index = origV.size()-1; // If we have a feature mask, be sure to include this new feature logger.info ("Added feature c "+numFeaturesAdded+" "+newFeatureValue+ ' ' + s); // xxx Also print the gradient here, if the feature already exists. numFeaturesAdded++; } } else if (featuresSelected != null) { int index = gg2.getIndexAtRank (i); //System.out.println ("Atomic feature rank "+i+" at index "+index); if (!featuresSelected.contains (index) // A new atomic feature added to the FeatureSelection // Don't add features with exactly the same gain value: they are probably an // "exactly overlapping duplicate" // xxx Note that this might actually increase over-fitting! && (i == 0 || gg2.getValueAtRank(i-1) != gg2.getValueAtRank(i))) { fcl.add (new FeatureConjunction (origV, new int[] {index})); logger.info ("Added feature a "+numFeaturesAdded+" "+gg2.getValueAtRank(i)+ ' ' + gg2.getObjectAtRank(i)); numFeaturesAdded++; } } if (numFeaturesAdded >= numNewFeatures) { logger.info ("Stopping feature induction: numFeaturesAdded="+numFeaturesAdded); break; } } logger.info ("Finished adding features"); } public FeatureInducer (RankedFeatureVector.Factory ranker, InstanceList ilist, int numNewFeatures) { //this (ilist, classifications, numNewFeatures, 200, numNewFeatures); //this (ilist, classifications, numNewFeatures, 200, 500); this (ranker, ilist, numNewFeatures, numNewFeatures, numNewFeatures); } // This must be run on test instance lists before they can be transduced, because we have to add the right // feature combinations! public void induceFeaturesFor (InstanceList ilist, boolean withFeatureShrinkage, boolean addPerClassFeatures) { assert (addPerClassFeatures == false); assert (withFeatureShrinkage == false); FeatureSelection fs = ilist.getFeatureSelection (); assert (ilist.getPerLabelFeatureSelection() == null); if (fcl.size() == 0) return; for (int i = 0; i < ilist.size(); i++) { //System.out.println ("Induced features for instance #"+i); Instance inst = ilist.get(i); Object data = inst.getData (); if (data instanceof AugmentableFeatureVector) { AugmentableFeatureVector afv = (AugmentableFeatureVector) data; fcl.addTo (afv, 1.0, fs); } else if (data instanceof FeatureVectorSequence) { FeatureVectorSequence fvs = (FeatureVectorSequence) data; for (int j = 0; j < fvs.size(); j++) fcl.addTo ((AugmentableFeatureVector) fvs.get(j), 1.0, fs); } else { throw new IllegalArgumentException ("Unsupported instance data type "+data.getClass().getName()); } } } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeInt(beam1); out.writeInt(beam2); out.writeObject(fcl); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); beam1 = in.readInt(); beam2 = in.readInt(); fcl = (FeatureConjunction.List)in.readObject(); } }
9,479
40.762115
115
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Matrix2.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.types; import java.util.logging.*; import java.util.Arrays; import cc.mallet.types.Matrix; import cc.mallet.util.MalletLogger; @Deprecated // This class is very sparsely used, and I think we can get rid of it. -akm 1/2008 // TODO Remove this class public final class Matrix2 extends DenseMatrix { private static Logger logger = MalletLogger.getLogger(Matrix2.class.getName()); int nr, nc; public Matrix2 (double[] values, int nr, int nc) { assert (values.length == nr * nc); this.values = values; this.nr = nr; this.nc = nc; } public Matrix2 (int nr, int nc) { this (new double[nr * nc], nr, nc); } public Matrix2 (double[][] values) { this.nr = values.length; this.nc = values[0].length; for (int i = 1; i < nr; i++) if (values[i].length != nc) throw new IllegalArgumentException ("Trying to initialize Matrix with array having columns to different lengths."); this.values = new double[nr * nc]; for (int i = 0; i < nr; i++) System.arraycopy (values[i], 0, values, i*nc, nc); } public Matrix2 (double value, int nr, int nc) { this.nr = nr; this.nc = nc; this.values = new double[nr * nc]; Arrays.fill (this.values, value); } public int getNumDimensions () { return 2; } public int getDimensions (int[] sizes) { sizes[0] = nr; sizes[1] = nc; return 2; } public double value (int rowIndex, int colIndex) { return values[(nc * rowIndex) + colIndex]; } public final void arrayCopyInto (double[] array, int startingArrayIndex) { System.arraycopy (values, 0, array, startingArrayIndex, values.length); } // Copy the contents of double[] array into this Matrix2, starting // at index i in the array, and continuing to fill all of this Matrix2. public final void arrayCopyFrom (double[] array, int startingArrayIndex) { System.arraycopy (array, startingArrayIndex, values, 0, values.length); } public void setValue (int rowIndex, int colIndex, double value) { values[(nc * rowIndex) + colIndex] = value; } public boolean sizeMatches (ConstantMatrix m) { if (m instanceof Matrix2) return (((Matrix2)m).nr == nr && ((Matrix2)m).nc == nc); int[] otherDims = new int[10]; int numDimensions = getDimensions (otherDims); return (numDimensions == 2 && otherDims[0] == nr && otherDims[1] == nc); } public boolean sizeMatches (Matrix2 m) { return (m.nr == nr && m.nc == nc); } public int getNumRows () { return nr; } public int getNumCols () { return nc; } public Matrix2 transpose () { Matrix2 ret = new Matrix2 (nc, nr); for (int i = 0; i < nr; i++) for (int j = 0; j < nc; j++) ret.values[j*nr+i] = values[i*nc+j]; return ret; } // The Matrix interface public final double value (int[] indices) { assert (indices.length == 2); return values[indices[0]*nc+indices[1]]; } public final void setValue (int[] indices, double val) { assert (indices.length == 2); values[indices[0]*nc+indices[1]] = val; } // Access using a single index public final int singleIndex (int[] indices) { assert (indices.length == 2); return indices[indices[0]*nc+indices[1]]; } public final void singleToIndices (int i, int[] indices) { assert (indices.length == 2); assert (i < nc * nr); indices[0] = i/nc; indices[1] = i%nc; } public final double singleValue (int i) { return values[i]; } public final void setSingle (int i, double value) { values[i] = value; } public final int singleSize () { return nc * nr; } public final ConstantMatrix cloneMatrix () { return cloneMatrix2 (); } public final Matrix2 cloneMatrix2 () { Matrix2 ret = new Matrix2 (nr, nc); System.arraycopy (values, 0, ret.values, 0, values.length); return ret; } public final void setAll (double v) { for (int i = 0; i < values.length; i++) values[i] = v; } /** If "ifSelected" is false, it reverses the selection. If "fselection" is null, this implies that all features are selected; all values will be changed unless "ifSelected" is false. */ public final void setAll (double v, FeatureSelection fselection, boolean ifSelected) { if (fselection == null) { if (ifSelected == true) { logger.info ("Matrix2.setAll using FeatureSelection==null"); setAll (v); } } else { logger.info ("Matrix2.setAll using FeatureSelection"); for (int i = 0; i < values.length; i++) if (fselection.contains(i) ^ !ifSelected) values[i] = v; } } /** If "ifSelected" is false, it reverses the selection. If "fselection" is null, this implies that all features are selected; all values in the row will be changed unless "ifSelected" is false. */ public final void rowSetAll (int ri, double v, FeatureSelection fselection, boolean ifSelected) { assert (ri < nr); if (fselection == null) { if (ifSelected == true) { for (int ci = 0; ci < nc; ci++) values[ri*nc+ci] = v; } } else { // xxx Temporary check for full selection //assert (fselection.nextDeselectedIndex (0) == nc); for (int ci = 0; ci < nc; ci++) if (fselection.contains(ci) ^ !ifSelected) values[ri*nc+ci] = v; } } public final void plusEquals (int ri, int ci, double value) { assert (ri < nr); assert (ci < nc); values[ri*nc+ci] += value; } public final void rowPlusEquals (int ri, Vector v, double factor) { assert (ri < nr); for (int vli = 0; vli < v.numLocations(); vli++) { //System.out.println ("Matrix2 values.length="+values.length+" index="+(ri*nc+v.indexAtLocation(vli))+" ri="+ri+" nc="+nc+" v.indexAtLocation("+vli+")="+v.indexAtLocation(vli)); values[ri*nc+v.indexAtLocation(vli)] += v.valueAtLocation(vli) * factor; } } //added by Fuchun public final void rowPlusEquals (int ri, double v, double factor) { assert (ri < nr); for (int vli = 0; vli < nc; vli++) { values[ri*nc+vli] += v * factor; } } public final void columnPlusEquals (int ci, Vector v, double factor) { assert (ci < nc); for (int vli = 0; vli < v.numLocations(); vli++) values[v.indexAtLocation(vli)*nc+ci] += v.valueAtLocation(vli) * factor; } //added by Fuchun public final void columnPlusEquals (int ci, double v, double factor) { assert (ci < nc); for (int vli = 0; vli < nr; vli++) values[vli*nc+ci] += v* factor; } public final double rowDotProduct (int ri, Vector v) { double ret = 0; for (int cil = 0; cil < v.numLocations(); cil++) { int ci = v.indexAtLocation (cil); // Just skip it if ci is beyond the boundaries of this matrix; // everything outside is assumed to have zero value. if (ci < nc) ret += values[ri*nc+ci] * v.valueAtLocation(cil); } return ret; } /** Skip all column indices higher than "maxCi". This lets you store non-vocabulary based parameters in the high column indices, without fearing that they may later be included by accident if the dictionary grows. You may pass null for selection. */ public final double rowDotProduct (int ri, Vector v, int maxCi, FeatureSelection selection) { double ret = 0; if (selection != null) { for (int cil = 0; cil < v.numLocations(); cil++) { int ci = v.indexAtLocation (cil); if (selection.contains(ci) && ci < nc && ci <= maxCi) ret += values[ri*nc+ci] * v.valueAtLocation(cil); } } else { for (int cil = 0; cil < v.numLocations(); cil++) { int ci = v.indexAtLocation (cil); if (ci < nc && ci <= maxCi) ret += values[ri*nc+ci] * v.valueAtLocation(cil); } } return ret; } public final double twoNormSquared () { double ret = 0; for (int i = 0; i < values.length; i++) ret += values[i] * values[i]; return ret; } public void print () { for (int i = 0; i < nr; i++) { for (int j = 0; j < nc; j++) System.out.print (" " + values[i*nc+j]); System.out.println (""); } } public String toString () { StringBuffer sb = new StringBuffer(); for (int i = 0; i < nr; i++) { for (int j = 0; j < nc; j++) sb.append (" " + values[i*nc+j]); sb.append ("\n"); } return sb.toString(); } }
8,533
28.027211
180
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/SparseMatrixn.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.types; import java.io.Serializable; import java.io.IOException; import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.util.Arrays; import cc.mallet.util.ArrayUtils; // Generated package name /** * Implementation of Matrix that allows arbitrary * number of dimensions. This implementation * simply uses a flat array. * * Created: Tue Sep 16 14:52:37 2003 * * @author <a href="mailto:[email protected]">Charles Sutton</a> * @version $Id: SparseMatrixn.java,v 1.1 2007/10/22 21:37:39 mccallum Exp $ */ public class SparseMatrixn implements Matrix, Cloneable, Serializable { private SparseVector values; private int numDimensions; private int[] sizes; private int singleSize; /** * Create a 1-d dense matrix with the given values. */ public SparseMatrixn(double[] vals) { numDimensions = 1; sizes = new int[1]; sizes [0] = vals.length; values = new SparseVector (vals); computeSingleSIze (); } /** * Create a dense matrix with the given dimensions. * * @param szs An array containing the maximum for * each dimension. */ public SparseMatrixn (int szs[]) { numDimensions = szs.length; sizes = (int[])szs.clone(); int total = 1; for (int j = 0; j < numDimensions; j++) { total *= sizes [j]; } values = new SparseVector (new double [total]); computeSingleSIze (); } public SparseMatrixn (int[] szs, double[] vals) { numDimensions = szs.length; sizes = (int[])szs.clone (); values = new SparseVector (vals); computeSingleSIze (); } /** * Create a sparse matrix with the given dimensions and * the given values. * * @param szs An array containing the maximum for * each dimension. * @param idxs An array containing the single index * for each entry of the matrix. A single index is * an integer computed from the indices of each dimension. * as returned by {@link Matrixn#singleIndex}. * @param vals A flat array of the entries of the * matrix, in row-major order. */ public SparseMatrixn (int[] szs, int[] idxs, double[] vals) { numDimensions = szs.length; sizes = (int[])szs.clone(); values = new SparseVector (idxs, vals, true, true); computeSingleSIze (); } private void computeSingleSIze () { int product = 1; for (int i = 0; i < sizes.length; i++) { int size = sizes[i]; product *= size; } singleSize = product; } public int getNumDimensions () { return numDimensions; }; public int getDimensions (int [] szs) { for ( int i = 0; i < numDimensions; i++ ) { szs [i] = this.sizes [i]; } return numDimensions; } public double value (int[] indices) { return values.value (singleIndex (indices)); } public void setValue (int[] indices, double value) { values.setValue (singleIndex (indices), value); } /** * Returns an array of all the present indices. * Callers must not modify the return value. */ public int[] getIndices () { return values.getIndices (); } public ConstantMatrix cloneMatrix () { /* The Matrixn constructor will clone the arrays. */ return new SparseMatrixn (sizes, values.getIndices (), values.getValues ()); } public Object clone () { return cloneMatrix(); } public int singleIndex (int[] indices) { return Matrixn.singleIndex (sizes, indices); } // This is public static so it will be useful as a general // dereferencing utility for multidimensional arrays. public static int singleIndex (int[] szs, int[] indices) { int idx = 0; for ( int dim = 0; dim < indices.length; dim++ ) { idx = (idx * szs[dim]) + indices [dim]; } return idx; } public void singleToIndices (int single, int[] indices) { Matrixn.singleToIndices (single, indices, sizes); } public boolean equals (Object o) { if (o instanceof SparseMatrixn) { /* This could be extended to work for all Matrixes. */ SparseMatrixn m2 = (SparseMatrixn) o; return (numDimensions == m2.numDimensions) && (sizes.equals (m2.sizes)) && (values.equals (m2.values)); } else { return false; } } /** * Returns a one-dimensional array representation of the matrix. * Caller must not modify the return value. * @return An array of the values where index 0 is the major index, etc. */ public double[] toArray () { return values.getValues (); } // Methods from Matrix public double singleValue (int i) { return values.singleValue (i); } public int singleSize () { return singleSize; } // Access by index into sparse array, efficient for sparse and dense matrices public int numLocations () { return values.numLocations (); } public int location (int index) { return values.location (index); } public double valueAtLocation (int location) { return values.valueAtLocation (location); } public void setValueAtLocation (int location, double value) { values.setValueAtLocation (location, value); } // Returns a "singleIndex" public int indexAtLocation (int location) { return values.indexAtLocation (location); } public double dotProduct (ConstantMatrix m) { return values.dotProduct (m); } public double absNorm () { return values.absNorm (); } public double oneNorm () { return values.oneNorm (); } public double twoNorm () { return values.twoNorm (); } public double infinityNorm () { return values.infinityNorm (); } public void print() { values.print (); } public boolean isNaN() { return values.isNaN (); } public void setSingleValue (int i, double value) { values.setValue (i, value); } public void incrementSingleValue (int i, double delta) { double value = values.value (i); values.setValue (i, value + delta); } public void setAll (double v) { values.setAll (v); } public void set (ConstantMatrix m) { throw new UnsupportedOperationException ("Not yet implemented."); } public void setWithAddend (ConstantMatrix m, double addend) { throw new UnsupportedOperationException ("Not yet implemented."); } public void setWithFactor (ConstantMatrix m, double factor) { throw new UnsupportedOperationException ("Not yet implemented."); } public void plusEquals (ConstantMatrix m) { plusEquals (m, 1.0); } // sucks, but so does the visitor pattern. not often used. public void plusEquals (ConstantMatrix m, double factor) { if (m instanceof SparseVector) { values.plusEqualsSparse ((SparseVector) m, factor); } else if (m instanceof SparseMatrixn) { SparseMatrixn smn = (SparseMatrixn) m; if (Arrays.equals (sizes, smn.sizes)) { values.plusEqualsSparse (smn.values, factor); } else { throw new UnsupportedOperationException ("sizes of " + m + " do not match " + this); } } else { throw new UnsupportedOperationException ("Can't add " + m + " to " + this); } } public void equalsPlus (double factor, ConstantMatrix m) { throw new UnsupportedOperationException ("Not yet implemented."); } public void timesEquals (double factor) { values.timesEquals (factor); } public void elementwiseTimesEquals (ConstantMatrix m) { throw new UnsupportedOperationException ("Not yet implemented."); } public void elementwiseTimesEquals (ConstantMatrix m, double factor) { throw new UnsupportedOperationException ("Not yet implemented."); } public void divideEquals (double factor) { values.timesEquals (1 / factor); } public void elementwiseDivideEquals (ConstantMatrix m) { throw new UnsupportedOperationException ("Not yet implemented."); } public void elementwiseDivideEquals (ConstantMatrix m, double factor) { throw new UnsupportedOperationException ("Not yet implemented."); } public double oneNormalize () { double norm = values.oneNorm(); values.timesEquals (1 / norm); return norm; } public double twoNormalize () { double norm = values.twoNorm(); values.timesEquals (1 / norm); return norm; } public double absNormalize () { double norm = values.absNorm(); values.timesEquals (1 / norm); return norm; } public double infinityNormalize () { double norm = values.infinityNorm(); values.timesEquals (1 / norm); return norm; } // Serialization garbage private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { out.defaultWriteObject (); out.writeInt (CURRENT_SERIAL_VERSION); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject (); int version = in.readInt (); } }
9,347
22.907928
92
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/StringEditFeatureVectorSequence.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.types; import java.io.*; import java.util.regex.*; import java.util.HashMap; import gnu.trove.TObjectIntHashMap; import java.util.Set; import java.util.Iterator; // xxx A not very space-efficient version. I'll compress it later. public class StringEditFeatureVectorSequence extends FeatureVectorSequence implements Serializable { private int string1Length, string2Length; private String string1, string2; private String[] string1Blocks, string2Blocks; private TObjectIntHashMap string1Present, string2Present; private TObjectIntHashMap lexicon; private int[] block1Indices, block2Indices; private char delim = ':'; private static final char defaultDelimiter = ':'; public StringEditFeatureVectorSequence (FeatureVector[] featureVectors, String s1, String s2) { this (featureVectors, s1, s2, defaultDelimiter); } public StringEditFeatureVectorSequence(FeatureVector[] featureVectors, String s1, String s2, char delimiter) { this (featureVectors, s1, s2, delimiter, null); } public StringEditFeatureVectorSequence(FeatureVector[] featureVectors, String s1, String s2, HashMap lexic) { this (featureVectors, s1, s2, defaultDelimiter, lexic); } public StringEditFeatureVectorSequence(FeatureVector[] featureVectors, String s1, String s2, char delimiter, HashMap lexic) { super (featureVectors); this.delim = delimiter; this.lexicon = new TObjectIntHashMap(); if (lexic != null) { Set keys = lexic.keySet(); java.util.Iterator iter = keys.iterator(); while (iter.hasNext()) this.lexicon.put((String) iter.next(), 1); } this.string1 = s1; this.string2 = s2; this.string1Length = s1.length() + 2; this.string2Length = s2.length() + 2; string1Blocks = string1.split("" + delim); string2Blocks = string2.split("" + delim); string1Present = new TObjectIntHashMap(); string2Present = new TObjectIntHashMap(); block1Indices = new int[string1Length]; if (string1Blocks.length > 0) { int whichBlock = 0; block1Indices[0] = whichBlock++; for (int i = 0; i < string1Blocks.length; i++) string1Present.put(string1Blocks[i], 1); for (int i = 1; i < string1Length-1; i++) block1Indices[i] = ((string1.charAt(i-1) == delim) ? whichBlock++ : -1); block1Indices[string1Length-1] = -1; } block2Indices = new int[string2Length]; if (string2Blocks.length > 0) { int whichBlock = 0; block2Indices[0] = whichBlock++; for (int i = 0; i < string2Blocks.length; i++) string2Present.put(string2Blocks[i], 1); for (int i = 1; i < string2Length - 1; i++) block2Indices[i] = ((string2.charAt(i-1) == delim) ? whichBlock++ : -1); block2Indices[string2Length-1] = -1; } } public String getString1() { return string1; } public String getString2() { return string2; } public int getString1Length () { return string1Length; } public int getString2Length () { return string2Length; } // End of Block public int getString1EOBIndex(String delimiter) { return getString1EOBIndex(delimiter, 0); } public int getString1EOBIndex(String delimiter, int start) { return getString1IndexOf(delimiter, start); } public String getString1BlockAtIndex(int idx) { if (idx < 0 || idx >= block1Indices.length || block1Indices[idx] < 0 || block1Indices[idx] >= string1Blocks.length) return null; else return string1Blocks[block1Indices[idx]]; } public int getString1IndexOf(String str, int start) { int toret = string1.indexOf(str, start); if (toret == -1) toret = string1.length() - 1 - start; else toret = toret - start; if (toret < 1) return -1; return toret; } public boolean isPresent1(String patternStr) { Pattern p = Pattern.compile(patternStr); Matcher m = p.matcher(string1); boolean b = m.matches(); return b; } public boolean isPresentInString1(String str) { return string1Present.containsKey(str); } public char getString1Char(int index) { index = index - 1; if (index < 0 || index >= string1.length()) return (char) 0; else return string1.charAt(index); } public int getString2EOBIndex(String delimiter) { return getString2EOBIndex(delimiter, 0); } public int getString2EOBIndex(String delimiter, int start) { return getString2IndexOf(delimiter, start); } public String getString2BlockAtIndex(int idx) { if (idx < 0 || idx >= block2Indices.length || block2Indices[idx] < 0 || block2Indices[idx] >= string2Blocks.length) return null; else return string2Blocks[block2Indices[idx]]; } public boolean isPresentInString2(String str) { return string2Present.containsKey(str); } public int getString2IndexOf(String str, int start) { int toret = string2.indexOf(str, start); if (toret == -1) toret = string2.length() - 1 - start; else toret = toret - start; if (toret < 1) return -1; return toret; } public boolean isPresent2(String patternStr) { Pattern p = Pattern.compile(patternStr); Matcher m = p.matcher(string2); boolean b = m.matches(); return b; } public char getString2Char(int index) { index = index - 1; if (index < 0 || index >= string2.length()) return (char) 0; else return string2.charAt(index); } public boolean isInLexicon(String str) { if (lexicon == null || str == null) return false; return lexicon.containsKey(str); } public String toString () { StringBuffer sb = new StringBuffer (); sb.append (super.toString()); sb.append ('\n'); sb.append ("String 1: " + string1Length + " String 2: " + string2Length); return sb.toString(); } // Serialization of Instance private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private static final int NULL_INTEGER = -1; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeInt (string1Length); out.writeInt (string2Length); out.writeObject (string1); out.writeObject (string2); if (string1Blocks == null) { out.writeInt(NULL_INTEGER); } else { int size = string1Blocks.length; out.writeInt(size); for(int i=0; i<size; i++) { out.writeObject(string1Blocks[i]); } } if (string2Blocks == null) { out.writeInt(NULL_INTEGER); } else { int size = string2Blocks.length; out.writeInt(size); for(int i=0; i<size; i++) { out.writeObject(string2Blocks[i]); } } out.writeObject(string1Present); out.writeObject(string2Present); out.writeObject(lexicon); if (block1Indices == null) { out.writeInt(NULL_INTEGER); } else { int size = block1Indices.length; out.writeInt(size); for (int i=0; i<size; i++) { out.writeInt(block1Indices[i]); } } if (block2Indices == null) { out.writeInt(NULL_INTEGER); } else { int size = block2Indices.length; out.writeInt(size); for (int i=0; i<size; i++) { out.writeInt(block2Indices[i]); } } out.writeChar(delim); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); int string1Length = in.readInt(); int string2Length = in.readInt(); String string1 = (String) in.readObject(); String string2 = (String) in.readObject(); int size = in.readInt(); if (size == NULL_INTEGER) { string1Blocks = null; } else { string1Blocks = new String[size]; for (int i = 0; i<size; i++) { string1Blocks[i] = (String) in.readObject(); } } size = in.readInt(); if (size == NULL_INTEGER) { string2Blocks = null; } else { string2Blocks = new String[size]; for (int i = 0; i<size; i++) { string2Blocks[i] = (String) in.readObject(); } } TObjectIntHashMap string1Present = (TObjectIntHashMap) in.readObject(); TObjectIntHashMap string2Present = (TObjectIntHashMap) in.readObject(); TObjectIntHashMap lexicon = (TObjectIntHashMap) in.readObject(); size = in.readInt(); if (size == NULL_INTEGER) { block1Indices = null; } else { block1Indices = new int[size]; for (int i = 0; i<size; i++) { block1Indices[i] = in.readInt(); } } size = in.readInt(); if (size == NULL_INTEGER) { block2Indices = null; } else { block2Indices = new int[size]; for (int i = 0; i<size; i++) { block2Indices[i] = in.readInt(); } } delim = in.readChar(); } }
9,380
26.672566
132
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/CrossValidationIterator.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.types; import java.io.Serializable; import java.util.ArrayList; import java.util.List; import java.util.NoSuchElementException; import cc.mallet.types.InstanceList; /** * An iterator which splits an {@link InstanceList} into n-folds and iterates * over the folds for use in n-fold cross-validation. For each iteration, * list[0] contains a {@link InstanceList} with n-1 folds typically used for * training and list[1] contains an {@link InstanceList} with 1 fold typically * used for validation. * * This class uses {@link MultiInstanceList} to avoid creating a new * {@link InstanceList} each iteration. * * TODO - currently the distribution is completely random, an improvement would * be to provide a stratified random distribution. * * @see MultiInstanceList * @see InstanceList * * @author Aron Culotta <a href="mailto:[email protected]">[email protected]</a> */ public class CrossValidationIterator implements java.util.Iterator<InstanceList[]>, Serializable { private static final long serialVersionUID = 234516468015114991L; private final int nfolds; private final InstanceList[] folds; private int index; /** * Constructs a new n-fold cross-validation iterator * * @param ilist instance list to split into folds and iterate over * @param nfolds number of folds to split InstanceList into * @param r The source of randomness to use in shuffling. */ public CrossValidationIterator (InstanceList ilist, int nfolds, java.util.Random r) { assert (nfolds > 0) : "nfolds: " + this.nfolds; this.nfolds = nfolds; this.index = 0; double fraction = (double) 1 / nfolds; double[] proportions = new double[nfolds]; for (int i=0; i < nfolds; i++) { proportions[i] = fraction; } this.folds = ilist.split (r, proportions); } /** * Constructs a new n-fold cross-validation iterator * * @param ilist instance list to split into folds and iterate over * @param _nfolds number of folds to split InstanceList into */ public CrossValidationIterator (InstanceList ilist, int _nfolds) { this (ilist, _nfolds, new java.util.Random (System.currentTimeMillis ())); } /** * Calls clear on each fold. It is recommended that this be always be called * when the iterator is no longer needed so that implementations of * InstanceList such as PagedInstanceList can clean up any temporary data * they may have outside the JVM. */ public void clear () { for (InstanceList list : this.folds) { list.clear(); } } public boolean hasNext () { return this.index < this.nfolds; } /** * Returns the next training/testing split. * * @return A two element array of {@link InstanceList}, where * <code>InstanceList[0]</code> contains n-1 folds for training and * <code>InstanceList[1]</code> contains 1 fold for testing. */ public InstanceList[] nextSplit () { if (!hasNext()) { throw new NoSuchElementException(); } InstanceList[] ret = new InstanceList[2]; if (this.folds.length == 1) { ret[0] = this.folds[0]; ret[1] = this.folds[0]; } else { InstanceList[] training = new InstanceList[this.folds.length - 1]; int j = 0; for (int i = 0; i < this.folds.length; i++) { if (i == this.index) { continue; } training[j++] = this.folds[i]; } ret[0] = new MultiInstanceList (training); ret[1] = this.folds[this.index]; } this.index++; return ret; } /** * Returns the next training/testing split. * * @return A two element array of {@link InstanceList}, where * <code>InstanceList[0]</code> contains <code>numTrainingFolds</code> * folds for training and <code>InstanceList[1]</code> contains * n - <code>numTrainingFolds</code> folds for testing. */ public InstanceList[] nextSplit (int numTrainFolds) { if (!hasNext()) { throw new NoSuchElementException (); } List<InstanceList> trainingSet = new ArrayList<InstanceList> (); List<InstanceList> testSet = new ArrayList<InstanceList> (); // train on folds [index, index+numTrainFolds), test on rest for (int i = 0; i < this.folds.length; i++) { int foldno = (this.index + i) % this.folds.length; if (i < numTrainFolds) { trainingSet.add (this.folds[foldno]); } else { testSet.add (this.folds[foldno]); } } InstanceList[] ret = new InstanceList[2]; ret[0] = new MultiInstanceList (trainingSet); ret[1] = new MultiInstanceList (testSet); this.index++; return ret; } /** * Returns the next training/testing split. * * @see java.util.Iterator#next() * @return A two element array of {@link InstanceList}, where * <code>InstanceList[0]</code> contains n-1 folds for training and * <code>InstanceList[1]</code> contains 1 fold for testing. */ public InstanceList[] next () { return nextSplit(); } public void remove () { throw new UnsupportedOperationException (); } }
6,007
33.136364
112
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Label.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.types; import java.util.ArrayList; import java.util.HashMap; import java.io.*; import cc.mallet.types.Alphabet; public class Label implements Labeling, Serializable, AlphabetCarrying { Object entry; LabelAlphabet dictionary; int index; protected Label () { throw new IllegalStateException ("Label objects can only be created by their Alphabet."); } /** You should never call this directly. New Label objects are created on-demand by calling LabelAlphabet.lookupIndex(obj). */ Label (Object entry, LabelAlphabet dict, int index) { this.entry = entry; this.dictionary = dict; assert (dict.lookupIndex (entry, false) == index); this.index = index; } public LabelAlphabet getLabelAlphabet () { return (LabelAlphabet) dictionary; } public int getIndex () { return index; } public Alphabet getAlphabet () { return dictionary; } public Alphabet[] getAlphabets () { return new Alphabet[] { dictionary }; } public Object getEntry () { return entry; } public String toString () { return entry.toString(); } // Comparable interface public int compareTo (Object o) { Label os = (Label)o; if (this.index < os.index) return -1; else if (this.index == os.index) return 0; else return 1; } // Labeling interface public Label getBestLabel () { return this; } public int getBestIndex () { return index; } static final double weightOfLabel = 1.0; public double getBestValue () { return weightOfLabel; } public double value (Label label) { assert (label.dictionary.equals(this.dictionary)); return weightOfLabel; } public double value (int labelIndex) { return labelIndex == this.index ? weightOfLabel : 0; } public int getRank (Label label) { assert (label.dictionary.equals(this.dictionary)); return label == this ? 0 : -1; } public int getRank (int labelIndex) { return labelIndex == this.index ? 0 : -1; } public Label getLabelAtRank (int rank) { assert (rank == 0); return this; } public double getValueAtRank (int rank) { assert (rank == 0); return weightOfLabel; } public void addTo (double[] weights) { weights[this.index] += weightOfLabel; } public void addTo (double[] weights, double scale) { weights[this.index] += weightOfLabel * scale; } // The number of non-zero-weight Labels in this Labeling, not total // number in the Alphabet public int numLocations () { return 1; } public Label labelAtLocation (int loc) { assert (loc == 0); return this; } public double valueAtLocation (int loc) { assert (loc == 0); return weightOfLabel; } public int indexAtLocation (int loc) { assert (loc == 0); return index; } public LabelVector toLabelVector () { return new LabelVector ((LabelAlphabet)dictionary, new int[] {index}, new double[] {weightOfLabel}); } public boolean equals (Object l) { if (l instanceof Label) { return ((Label)l).compareTo(this) == 0; } else throw new IllegalArgumentException ("Cannot compare a Label object with a " + l.getClass().getName() + " object."); } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject (dictionary); out.writeInt (index); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); dictionary = (LabelAlphabet) in.readObject (); index = in.readInt (); entry = dictionary.lookupObject (index); } }
4,176
20.203046
122
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/FeatureSequence.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.types; import java.util.Arrays; import java.io.*; /** * An implementation of {@link Sequence} that ensures that every * Object in the sequence has the same class. Feature sequences are * mutable, and will expand as new objects are added. * * @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class FeatureSequence implements Sequence, Serializable, AlphabetCarrying { Alphabet dictionary; int[] features; int length; /** * Creates a FeatureSequence given all of the objects in the * sequence. * * @param dict A dictionary that maps objects in the sequence * to numeric indices. * @param features An array where features[i] gives the index * in dict of the ith element of the sequence. */ public FeatureSequence (Alphabet dict, int[] features) { this(dict, features.length); for (int i = 0; i < features.length; i++) add(features[i]); } public FeatureSequence (Alphabet dict, int[] features, int len) { this(dict, len); for (int i = 0; i < len; i++) add(features[i]); } public FeatureSequence (Alphabet dict, int capacity) { dictionary = dict; features = new int[capacity > 2 ? capacity : 2]; length = 0; } public FeatureSequence (Alphabet dict) { this (dict, 2); } public int[] getFeatures() { return features ;} public Alphabet getAlphabet () { return dictionary; } public Alphabet[] getAlphabets() { return new Alphabet[] {getAlphabet()}; } public boolean alphabetsMatch (AlphabetCarrying object) { return getAlphabet().equals (object.getAlphabet()); } public final int getLength () { return length; } public final int size () { return length; } public final int getIndexAtPosition (int pos) { return features[pos]; } public Object getObjectAtPosition (int pos) { return dictionary.lookupObject (features[pos]); } // xxx This method name seems a bit ambiguous? public Object get (int pos) { return dictionary.lookupObject (features[pos]); } public String toString () { StringBuffer sb = new StringBuffer (); for (int fsi = 0; fsi < length; fsi++) { Object o = dictionary.lookupObject(features[fsi]); sb.append (fsi); sb.append (": "); sb.append (o.toString()); sb.append (" ("); sb.append (features[fsi]); sb.append (")\n"); } return sb.toString(); } protected void growIfNecessary () { if (length == features.length) { int[] newFeatures = new int[features.length * 2]; System.arraycopy (features, 0, newFeatures, 0, length); features = newFeatures; } } public void add (int featureIndex) { growIfNecessary (); assert (featureIndex < dictionary.size()); features[length++] = featureIndex; } public void add (Object key) { int fi = dictionary.lookupIndex (key); if (fi >= 0) add (fi); // [email protected] // With the exception below, it is not possible to pipe data // when growth of the alphabet is stopped. We want to be // able to do this, for example to process new data using // an old Pipe (for example from a fixed, cached classifier // that we want to apply to new data.). //else // xxx Should we raise an exception if the appending doesn't happen? "yes" -akm, added 1/2008 // throw new IllegalStateException ("Object cannot be added to FeatureSequence because its Alphabet is frozen."); } public void addFeatureWeightsTo (double[] weights) { for (int i = 0; i < length; i++) weights[features[i]]++; } public void addFeatureWeightsTo (double[] weights, double scale) { for (int i = 0; i < length; i++) weights[features[i]] += scale; } public int[] toFeatureIndexSequence () { int[] feats = new int[length]; System.arraycopy (features, 0, feats, 0, length); return feats; } public int[] toSortedFeatureIndexSequence () { int[] feats = this.toFeatureIndexSequence (); java.util.Arrays.sort (feats); return feats; } /** * Remove features from the sequence that occur fewer than * <code>cutoff</code> times in the corpus, as indicated by * the provided counts. Also swap in the new, reduced alphabet. * This method alters the instance in place; it is not appropriate * if the original instance will be needed. */ public void prune (double[] counts, Alphabet newAlphabet, int cutoff) { // The goal is to replace the sequence of features in place, by // creating a new array and then swapping it in. // First: figure out how long the new array will have to be int newLength = 0; for (int i = 0; i < length; i++) { if (counts[features[i]] >= cutoff) { newLength++; } } // Second: allocate a new features array int[] newFeatures = new int[newLength]; // Third: fill the new array int newIndex = 0; for (int i = 0; i < length; i++) { if (counts[features[i]] >= cutoff) { Object feature = dictionary.lookupObject(features[i]); newFeatures[newIndex] = newAlphabet.lookupIndex(feature); newIndex++; } } // Fourth: swap out the arrays features = newFeatures; length = newLength; dictionary = newAlphabet; } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private static final int NULL_INTEGER = -1; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject (dictionary); out.writeInt (features.length); for (int i = 0; i < features.length; i++) out.writeInt (features[i]); out.writeInt (length); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int featuresLength; int version = in.readInt (); dictionary = (Alphabet) in.readObject (); featuresLength = in.readInt(); features = new int[featuresLength]; for (int i = 0; i < featuresLength; i++) features[i] = in.readInt (); length = in.readInt (); } }
6,612
25.882114
115
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/FeatureCounter.java
package cc.mallet.types; /** Efficient, compact, incremental counting of features in an alphabet. */ public class FeatureCounter { Alphabet alphabet; gnu.trove.TIntIntHashMap featureCounts; public FeatureCounter (Alphabet alphabet) { this.alphabet = alphabet; featureCounts = new gnu.trove.TIntIntHashMap(); } public int increment (Object entry) { return featureCounts.adjustOrPutValue(alphabet.lookupIndex(entry), 1, 1); } public int increment (Object entry, int incr) { return featureCounts.adjustOrPutValue(alphabet.lookupIndex(entry), incr, incr); } public int increment (int featureIndex) { if (featureIndex < 0 || featureIndex > alphabet.size()) throw new IllegalArgumentException ("featureIndex "+featureIndex+" out of range"); return featureCounts.adjustOrPutValue(featureIndex, 1, 1); } public int increment (int featureIndex, int incr) { if (featureIndex < 0 || featureIndex > alphabet.size()) throw new IllegalArgumentException ("featureIndex "+featureIndex+" out of range"); return featureCounts.adjustOrPutValue(featureIndex, incr, incr); } public int get (int featureIndex) { if (featureIndex < 0 || featureIndex > alphabet.size()) throw new IllegalArgumentException ("featureIndex "+featureIndex+" out of range"); return featureCounts.get (featureIndex); } /** Unlike increment(Object), this method does not add the entry to the Alphabet if it is not there already. */ public int get (Object entry) { int fi = alphabet.lookupIndex(entry, false); if (fi == -1) return 0; else return featureCounts.get (fi); } public int put (int featureIndex, int value) { if (featureIndex < 0 || featureIndex > alphabet.size()) throw new IllegalArgumentException ("featureIndex "+featureIndex+" out of range"); return featureCounts.put (featureIndex, value); } public int put (Object entry, int value) { return featureCounts.put (alphabet.lookupIndex(entry), value); } public FeatureVector toFeatureVector () { int[] indices = featureCounts.keys(); double[] values = new double[indices.length]; for (int i = 0; i < indices.length; i++) values[i] = featureCounts.get(indices[i]); return new FeatureVector (alphabet, indices, values); } public RankedFeatureVector toRankedFeatureVector () { int[] indices = featureCounts.keys(); double[] values = new double[indices.length]; for (int i = 0; i < indices.length; i++) values[i] = featureCounts.get(indices[i]); return new RankedFeatureVector (alphabet, indices, values); } }
2,541
31.589744
112
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/SequencePair.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.types; import cc.mallet.fst.Segment; public class SequencePair<I,O> { protected Sequence<I> input; protected Sequence<O> output; public SequencePair (Sequence<I> input, Sequence<O> output) { this.input = input; this.output = output; } protected SequencePair () { } public Sequence<I> input() { return input; } public Sequence<O> output() { return output; } /* This doesn't belong here. -akm 11/2007 public Sequence[] outputNBest() {return outputNBest;} public double[] costNBest(){return costNBest;} public double[] confidenceNBest(){return confidenceNBest;} */ }
1,130
25.928571
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/ROCData.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.types; import java.io.Serializable; import java.text.DecimalFormat; import java.text.NumberFormat; import java.util.Arrays; import cc.mallet.classify.Classification; import cc.mallet.classify.Classifier; import cc.mallet.classify.Trial; import cc.mallet.types.Alphabet; import cc.mallet.types.AlphabetCarrying; import cc.mallet.types.InstanceList; import cc.mallet.types.Label; import cc.mallet.types.LabelAlphabet; import cc.mallet.types.LabelVector; /** * Tracks ROC data for instances in {@link Trial} results. * * @see Trial * @see InstanceList * @see Classifier * @see Classification * * @author Michael Bond <a href="mailto:[email protected]">[email protected]</a> */ public class ROCData implements AlphabetCarrying, Serializable { private static final long serialVersionUID = -2060194953037720640L; public static final int TRUE_POSITIVE = 0; public static final int FALSE_POSITIVE = 1; public static final int FALSE_NEGATIVE = 2; public static final int TRUE_NEGATIVE = 3; private final LabelAlphabet labelAlphabet; /** Matrix of class, threshold, [tp, fp, fn, tn] */ private final int[][][] counts; private final double[] thresholds; /** * Constructs a new object * * @param thresholds Array of thresholds to track counts for * @param labelAlphabet Label alphabet for instances in {@link Trial} */ public ROCData(double[] thresholds, final LabelAlphabet labelAlphabet) { // ensure that thresholds are sorted Arrays.sort(thresholds); this.counts = new int[labelAlphabet.size()][thresholds.length][4]; this.labelAlphabet = labelAlphabet; this.thresholds = thresholds; } /** * Adds classification results to the ROC data * * @param trial Trial results to add to ROC data */ public void add(Classification classification) { int correctIndex = classification.getInstance().getLabeling().getBestIndex(); LabelVector lv = classification.getLabelVector(); double[] values = lv.getValues(); if (!Alphabet.alphabetsMatch(this, lv)) { throw new IllegalArgumentException ("Alphabets do not match"); } int numLabels = this.labelAlphabet.size(); for (int label = 0; label < numLabels; label++) { double labelValue = values[label]; int[][] thresholdCounts = this.counts[label]; int threshold = 0; // add the trial to all the thresholds it would be positive for for (; threshold < this.thresholds.length && labelValue >= this.thresholds[threshold]; threshold++) { if (correctIndex == label) { thresholdCounts[threshold][TRUE_POSITIVE]++; } else { thresholdCounts[threshold][FALSE_POSITIVE]++; } } // add the trial to the thresholds it would be negative for for (; threshold < this.thresholds.length; threshold++) { if (correctIndex == label) { thresholdCounts[threshold][FALSE_NEGATIVE]++; } else { thresholdCounts[threshold][TRUE_NEGATIVE]++; } } } } /** * Adds trial results to the ROC data * * @param trial Trial results to add to ROC data */ public void add(Trial trial) { for (Classification classification : trial) { add(classification); } } /** * Adds existing ROC data to this ROC data * * @param rocData ROC data to add */ public void add(ROCData rocData) { if (!Alphabet.alphabetsMatch(this, rocData)) { throw new IllegalArgumentException ("Alphabets do not match"); } if (!Arrays.equals(this.thresholds, rocData.thresholds)) { throw new IllegalArgumentException ("Thresholds do not match"); } int countsLength = this.counts.length; for (int c = 0; c < countsLength; c++) { int[][] thisClassCounts = this.counts[c]; int[][] otherClassCounts = rocData.counts[c]; int classLength = thisClassCounts.length; for (int t = 0; t < classLength; t++) { int[] thisThrCounts = thisClassCounts[t]; int[] otherThrCounts = otherClassCounts[t]; int thrLength = thisThrCounts.length; for (int s = 0; s < thrLength; s++) { thisThrCounts[s] += otherThrCounts[s]; } } } } //@Override public Alphabet getAlphabet() { return this.labelAlphabet; } //@Override public Alphabet[] getAlphabets() { return new Alphabet[] { this.labelAlphabet }; } /** * Gets the raw counts for a specified label. * * @param label Label to get counts for * @see #TRUE_POSITIVE * @see #FALSE_POSITIVE * @see #FALSE_NEGATIVE * @see #TRUE_NEGATIVE * @return Array of raw counts for specified label */ public int[][] getCounts(Label label) { return this.counts[label.getIndex()]; } /** * Gets the raw counts for a specified label and threshold. * * If data was not collected for the exact threshold specified, then results * for the highest threshold <= the specified threshold will be returned. * * @param label Label to get counts for * @param threshold Threshold to get counts for * @see #TRUE_POSITIVE * @see #FALSE_POSITIVE * @see #FALSE_NEGATIVE * @see #TRUE_NEGATIVE * @return Array of raw counts for specified label and threshold */ public int[] getCounts(Label label, double threshold) { int index = Arrays.binarySearch(this.thresholds, threshold); if (index < 0) { index = (-index) - 2; } return this.counts[label.getIndex()][index]; } /** * Gets the label alphabet */ public LabelAlphabet getLabelAlphabet() { return this.labelAlphabet; } /** * Gets the precision for a specified label and threshold. * * If data was not collected for the exact threshold specified, then results * will for the highest threshold <= the specified threshold will be * returned. * * @param label Label to get precision for * @param threshold Threshold to get precision for * @return Precision for specified label and threshold */ public double getPrecision(Label label, double threshold) { int[] counts = getCounts(label, threshold); return (double) counts[TRUE_POSITIVE] / (double) (counts[TRUE_POSITIVE] + counts[FALSE_POSITIVE]); } /** * Gets the precision for a specified label and score. This differs from * {@link ROCData.getPrecision(Label, double)} in that it is the precision * for only scores falling in the one score value, not for all scores * above the threshold. * * If data was not collected for the exact threshold specified, then results * will for the highest threshold <= the specified threshold will be * returned. * * @param label Label to get precision for * @param threshold Threshold to get precision for * @return Precision for specified label and score */ public double getPrecisionForScore(Label label, double score) { final int[][] buckets = this.counts[label.getIndex()]; int index = Arrays.binarySearch(this.thresholds, score); if (index < 0) { index = (-index) - 2; } final double tp; final double fp; if (index == this.thresholds.length - 1) { tp = buckets[index][TRUE_POSITIVE]; fp = buckets[index][FALSE_POSITIVE]; } else { tp = buckets[index][TRUE_POSITIVE] - buckets[index + 1][TRUE_POSITIVE]; fp = buckets[index][FALSE_POSITIVE] - buckets[index + 1][FALSE_POSITIVE]; } return (double) tp / (double) (tp + fp); } /** * Gets the estimated percentage of training events that exceed the * threshold. * * @param label Label to get precision for * @param threshold Threshold to get precision for * @return Estimated percentage of events exceeding threshold */ public double getPositivePercent(Label label, double threshold) { final int[] counts = getCounts(label, threshold); final int positive = counts[TRUE_POSITIVE] + counts[FALSE_POSITIVE]; return ((double) positive / (double) (positive + counts[FALSE_NEGATIVE] + counts[TRUE_NEGATIVE])) * 100.0; } /** * Gets the recall rate for a specified label and threshold. * * If data was not collected for the exact threshold specified, then results * will for the highest threshold <= the specified threshold will be * returned. * * @param label Label to get recall for * @param threshold Threshold to get recall for * @return Recall rate for specified label and threshold */ public double getRecall(Label label, double threshold) { int[] counts = getCounts(label, threshold); return (double) counts[TRUE_POSITIVE] / (double) (counts[TRUE_POSITIVE] + counts[FALSE_NEGATIVE]); } /** * Gets the thresholds being tracked * * @return Array of thresholds */ public double[] getThresholds() { return this.thresholds; } /** * Sets the raw counts for a specified label and threshold. * * If data is not collected for the exact threshold specified, then counts * for the highest threshold <= the specified threshold will be set. * * @param label Label to get counts for * @param threshold Threshold to get counts for * @param newCounts New count values for the label and threshold * @see #TRUE_POSITIVE * @see #FALSE_POSITIVE * @see #FALSE_NEGATIVE * @see #TRUE_NEGATIVE */ public void setCounts(Label label, double threshold, int[] newCounts) { int index = Arrays.binarySearch(this.thresholds, threshold); if (index < 0) { index = (-index) - 2; } final int[] oldCounts = this.counts[label.getIndex()][index]; if (newCounts.length != oldCounts.length) { throw new IllegalArgumentException ("Array of counts must contain " + oldCounts.length + " elements."); } for (int i = 0; i < oldCounts.length; i++) { oldCounts[i] = newCounts[i]; } } //@Override public String toString() { final StringBuilder buf = new StringBuilder(); final NumberFormat format = new DecimalFormat("0.####"); for (int i = 0; i < this.labelAlphabet.size(); i++) { int[][] labelData = this.counts[i]; buf.append("ROC data for "); buf.append(this.labelAlphabet.lookupObject(i).toString()); buf.append('\n'); buf.append("THR\tTP\tFP\tFN\tTN\tPrecis\tRecall\n"); // add one row for each threshold for (int t = 0; t < this.thresholds.length; t++) { buf.append(this.thresholds[t]); for (int res : labelData[t]) { buf.append('\t').append(res); } double tp = labelData[t][TRUE_POSITIVE]; double sum = tp + labelData[t][FALSE_POSITIVE]; double precision = 0.0; if (sum != 0) { precision = tp / sum; } sum = tp + labelData[t][FALSE_NEGATIVE]; double recall = 0.0; if (sum != 0) { recall = tp / sum; } buf.append('\t').append(format.format(precision)); buf.append('\t').append(format.format(recall)); buf.append('\n'); } buf.append('\n'); } return buf.toString(); } }
12,811
34.10137
115
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/SparseVector.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.types; import java.util.Arrays; import java.io.*; import java.lang.reflect.Method; import java.lang.reflect.InvocationTargetException; import cc.mallet.util.PropertyList; /** A vector that allocates memory only for non-zero values. When you create a SparseVector, you pass in a list of indices. These are the only elements of the vector you will be allowed to change. The rest are fixed at 0. The interface to Sparse vector uses the concept of a location, which is an integer in the range 0..numLocations which can be mapped to the index (and value) of a non zero element of the vector. A SparseVector can be sparse or dense depending on whether or not an array if indices is specified at construction time. If the SparseVector is dense, the mapping from location to index is the identity mapping. The type of the value an element in a SparseVector (or FeatureVector) can be double or binary (0.0 or 1.0), depending on whether an array of doubles is specified at contruction time. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class SparseVector implements ConstantMatrix, Vector, Serializable { /** If the vector is sparse, then both indices and values are sparse. Indices into these arrays are called ``locations'' in the below. The indices[] array maps locations to indices of the (virtual) dense array that's being represented. value[] maps locations to values. */ protected int[] indices; // if this is null, then the vector is dense protected double[] values; // if this is null, then the vector is binary protected boolean hasInfinite; // if true, at least one value = -Inf or +Inf /** If "indices" is null, the vector will be dense. If "values" is null, the vector will be binary. The capacity and size arguments are used by AugmentableFeatureVector. */ public SparseVector (int[] indices, double[] values, int capacity, int size, boolean copy, boolean checkIndicesSorted, boolean removeDuplicates) { // "size" was pretty much ignored??? Why? int length; length = size; if (capacity < length) capacity = length; assert (size <= length); if (!(values == null || indices == null || indices.length == values.length)) throw new IllegalArgumentException ("Attempt to create sparse non-binary SparseVector with mismatching values & indices\n" +" indices.length = "+indices.length+" values.length = "+values.length); if (copy || capacity > length) { if (indices == null) this.indices = null; else { this.indices = new int[capacity]; System.arraycopy (indices, 0, this.indices, 0, length); } if (values == null) this.values = null; else { this.values = new double[capacity]; System.arraycopy (values, 0, this.values, 0, length); } } else { this.indices = indices; this.values = values; } if (checkIndicesSorted) sortIndices (); // This also removes duplicates else if (removeDuplicates) removeDuplicates (0); } // Create a dense Vector public SparseVector (double[] values, boolean copy) { this (null, values, values.length, values.length, copy, false, false); } public SparseVector (double[] values) { this (values, true); } public SparseVector (int size, double fillValue) { this (newArrayOfValue (size, fillValue), false); } public SparseVector (int[] indices, double[] values, boolean copy, boolean checkIndicesSorted, boolean removeDuplicates) { this (indices, values, (indices != null) ? indices.length : values.length, (indices != null) ? indices.length : values.length, copy, checkIndicesSorted, removeDuplicates); } public SparseVector (int[] indices, double[] values) { this (indices, values, true, true, true); } public SparseVector (int[] indices, double[] values, boolean copy) { this (indices, values, copy, true, true); } public SparseVector (int[] indices, double[] values, boolean copy, boolean checkIndicesSorted) { this (indices, values, copy, checkIndicesSorted, true); } // Create a vector that is possibly binary or non-binary public SparseVector (int[] indices, boolean copy, boolean checkIndicesSorted, boolean removeDuplicates, boolean binary) { this (indices, binary ? null : newArrayOfValue(indices.length,1.0), indices.length, indices.length, copy, checkIndicesSorted, removeDuplicates); } // Create a binary vector public SparseVector (int[] indices, int capacity, int size, boolean copy, boolean checkIndicesSorted, boolean removeDuplicates) { this (indices, null, capacity, size, copy, checkIndicesSorted, removeDuplicates); } public SparseVector (int[] indices, boolean copy, boolean checkIndicesSorted) { this (indices, null, copy, checkIndicesSorted, true); } public SparseVector (int[] indices, boolean copy) { this (indices, null, copy, true, true); } public SparseVector (int[] indices) { this (indices, null, true, true, true); } /** An empty vector, with all zero values */ public SparseVector () { this (new int[0], new double[0], false, false); } public SparseVector (Alphabet dict, PropertyList pl, boolean binary, boolean growAlphabet) { if (pl == null) { // xxx Fix SparseVector so that it can properly represent a vector that has all zeros. // Does this work? indices = new int[0]; values = null; return; } PropertyList.Iterator iter; if (binary == false) { binary = true; // If all the property list features are binary, make a binary SparseVector even if the constructor argument "binary" is false. // This will significantly save space, as well as multiplication time later! -akm 12/2007 iter = pl.numericIterator(); while (iter.hasNext()) { iter.nextProperty(); if (iter.getNumericValue() != 1.0) { binary = false; break; } } } AugmentableFeatureVector afv = new AugmentableFeatureVector (dict, binary); //afv.print(); //System.out.println ("SparseVector binary="+binary); //pl.print(); iter = pl.numericIterator(); while (iter.hasNext()) { iter.nextProperty(); //System.out.println ("SparseVector adding "+iter.getKey()+" "+iter.getNumericValue()); int index = dict.lookupIndex(iter.getKey(), growAlphabet); if (index >=0) { afv.add (index, iter.getNumericValue()); } //System.out.println ("SparseVector afv adding "+iter.getKey()+" afv.numLocations="+afv.numLocations()); } //afv.print(); // xxx Not so efficient? SparseVector sv = afv.toSparseVector(); //System.out.println ("SparseVector sv.numLocations="+sv.numLocations()); this.indices = sv.indices; this.values = sv.values; } public SparseVector (Alphabet dict, PropertyList pl, boolean binary) { this(dict, pl, binary, true); } private static double[] newArrayOfValue (int length, double value) { double[] ret = new double[length]; Arrays.fill (ret, value); return ret; } public boolean isBinary () { return values == null; } public void makeBinary () { throw new UnsupportedOperationException ("Not yet implemented"); } public void makeNonBinary () { throw new UnsupportedOperationException ("Not yet implemented"); } /*********************************************************************** * ACCESSORS ***********************************************************************/ public int getNumDimensions () { return 1; } // xxx What do we return for the length? It could be higher than this index. public int getDimensions (int[] sizes) { if (indices == null) sizes[0] = values.length; else // xxx This is pretty unsatisfactory, since there may be zero // values above this location. sizes[0] = indices[indices.length-1]; return 1; } // necessary for the SVM implementation! -dmetzler // ...but be careful, this is allowed to be null! -cas public int [] getIndices() { return indices; } // necessary for the SVM implementation! -dmetzler // ...but be careful, this is allowed to be null! -cas public double [] getValues() { return values; } // xxx This is just the number of non-zero entries... // This is different behavior than Matrix2!! public int numLocations () { return (values == null ? (indices == null ? 0 : indices.length) : values.length); } public int location (int index) { if (indices == null) return index; else return Arrays.binarySearch (indices, index); } public double valueAtLocation (int location) { return values == null ? 1.0 : values[location]; } public int indexAtLocation (int location) { return indices == null ? location : indices[location]; } public double value (int[] indices) { assert (indices.length == 1); if (indices == null) return values[indices[0]]; else return values[location(indices[0])]; } public double value (int index) { if (indices == null) try { return values[index]; } catch (ArrayIndexOutOfBoundsException e) { return 0.0; } else { int loc = location(index); if (loc < 0) return 0.0; else if (values == null) return 1.0; else return values[loc]; } } public void addTo (double[] accumulator, double scale) { if (indices == null) { for (int i = 0; i < values.length; i++) accumulator[i] += values[i] * scale; } else if (values == null) { for (int i = 0; i < indices.length; i++) accumulator[indices[i]] += scale; } else { for (int i = 0; i < indices.length; i++) accumulator[indices[i]] += values[i] * scale; } } public void addTo (double[] accumulator) { addTo (accumulator, 1.0); } public int singleIndex (int[] indices) { assert (indices.length == 1); return indices[0]; } public void singleToIndices (int i, int[] indices) { indices[0] = i; } public double singleValue (int i) { return value(i); } public int singleSize () { if (indices == null) return values.length; else if (indices.length == 0) return 0; else // This is just the highest index that will have non-zero value. // The full size of this dimension is "unknown" return indices[indices.length-1]; } public String toString() { return this.toString(false); } public String toString(boolean onOneLine) { StringBuffer sb = new StringBuffer (); for (int i = 0; i < values.length; i++) { sb.append((indices == null ? i : indices[i])); sb.append ("="); sb.append (values[i]); if (!onOneLine) sb.append ("\n"); else sb.append (' '); } return sb.toString(); } /*********************************************************************** * CLONING ***********************************************************************/ public ConstantMatrix cloneMatrix () { if (indices == null) return new SparseVector (values); else return new SparseVector (indices, values, true, false, false); } public ConstantMatrix cloneMatrixZeroed () { if (indices == null) return new SparseVector (new double[values.length]); else { int[] newIndices = new int[indices.length]; System.arraycopy (indices, 0, newIndices, 0, indices.length); return new SparseVector (newIndices, new double[values.length], true, false, false); } } /*********************************************************************** * MUTATORS ***********************************************************************/ /** * For each index i that is present in this vector, * set this[i] += v[i]. * If v has indices that are not present in this, * these are just ignored. */ public void plusEqualsSparse (SparseVector v) { plusEqualsSparse (v, 1.0); } /** * For each index i that is present in this vector, * set this[i] += factor * v[i]. * If v has indices that are not present in this, * these are just ignored. */ public void plusEqualsSparse (SparseVector v, double factor) { // Special case for dense sparse vector if (indices == null) { densePlusEqualsSparse (v, factor); return; } int loc1 = 0; int loc2 = 0; int numLocations1 = numLocations(); int numLocations2 = v.numLocations(); while ((loc1 < numLocations1) && (loc2 < numLocations2)) { int idx1 = indexAtLocation (loc1); int idx2 = v.indexAtLocation (loc2); if (idx1 == idx2) { values [loc1] += v.valueAtLocation (loc2) * factor; ++loc1; ++loc2; } else if (idx1 < idx2) { ++loc1; } else { // idx2 not present in this. Ignore. ++loc2; } } } /** * For each index i that is present in this vector, * set this[i] *= v[i]. * If v has indices that are not present in this, * these are just ignored. */ public void timesEqualsSparse (SparseVector v) { timesEqualsSparse (v, 1.0); } /** * For each index i that is present in this vector, * set this[i] *= factor * v[i]. * If v has indices that are not present in this, * these are just ignored. */ public void timesEqualsSparse (SparseVector v, double factor) { // Special case for dense sparse vector if (indices == null) { denseTimesEqualsSparse (v, factor); return; } int loc1 = 0; int loc2 = 0; while ((loc1 < numLocations()) && (loc2 < v.numLocations())) { int idx1 = indexAtLocation (loc1); int idx2 = v.indexAtLocation (loc2); if (idx1 == idx2) { values [loc1] *= v.valueAtLocation (loc2) * factor; ++loc1; ++loc2; } else if (idx1 < idx2) { ++loc1; } else { // idx2 not present in this. Ignore. ++loc2; } } } /** * For each index i that is present in this vector, * set this[i] *= factor * v[i]. * If v has indices that are not present in this, * these are set to zero */ public void timesEqualsSparseZero (SparseVector v, double factor) { // Special case for dense sparse vector if (indices == null) { denseTimesEqualsSparse (v, factor); return; } int loc1 = 0; int loc2 = 0; while ((loc1 < numLocations()) && (loc2 < v.numLocations())) { int idx1 = indexAtLocation (loc1); int idx2 = v.indexAtLocation (loc2); if (idx1 == idx2) { values [loc1] *= v.valueAtLocation (loc2) * factor; ++loc1; ++loc2; } else if (idx1 < idx2) { // idx1 not present in v. Zero. values[loc1] = 0; ++loc1; } else { // idx2 not present in this. Ignore ++loc2; } } } /** * Scale all elements by the same factor. */ public void timesEquals( double factor ) { for (int i = 0; i < values.length; i++) values[i] *= factor; } private void densePlusEqualsSparse (SparseVector v, double factor) { int maxloc = v.numLocations(); for (int loc = 0; loc < maxloc; loc++) { int idx = v.indexAtLocation (loc); if (idx >= values.length) break; values [idx] += v.valueAtLocation (loc) * factor; } } private void denseTimesEqualsSparse (SparseVector v, double factor) { int maxloc = v.numLocations(); for (int loc = 0; loc < maxloc; loc++) { int idx = v.indexAtLocation (loc); if (idx >= values.length) break; values [idx] *= v.valueAtLocation (loc) * factor; } } /** * Increments this[index] by value. * @throws IllegalArgumentException If index is not present. */ public void incrementValue (int index, double value) throws IllegalArgumentException { int loc = location (index); if (loc >= 0) values[loc] += value; else throw new IllegalArgumentException ("Trying to set value that isn't present in SparseVector"); } /** Sets every present index in the vector to v. */ public void setAll (double v) { for (int i = 0; i < values.length; i++) values[i] = v; } /** * Sets the value at the given index. * @throws IllegalArgumentException If index is not present. */ public void setValue (int index, double value) throws IllegalArgumentException { if (indices == null) values[index] = value; else { int loc = location(index); if (loc < 0) throw new IllegalArgumentException ("Can't insert values into a sparse Vector."); else values[loc] = value; } } /** Sets the value at the given location. */ public void setValueAtLocation (int location, double value) { values[location] = value; } /** Copy values from an array into this vector. The array should have the * same size as the vector */ // yanked from DenseVector public final void arrayCopyFrom( double[] a ) { arrayCopyFrom(a,0); } /** Copy values from an array starting at a particular location into this * vector. The array must have at least as many values beyond the * starting location as there are in the vector. * * @return Next uncopied location in the array. */ public final int arrayCopyFrom( double [] a , int startingArrayLocation ) { System.arraycopy( a, startingArrayLocation, values, 0, values.length ); return startingArrayLocation + values.length; } /** * Applies the method argument to each value in a non-binary vector. * The method should both accept a Double as an argument and return a Double. * * @throws IllegalArgumentException If the method argument has an * inappropriate signature. * @throws UnsupportedOperationException If vector is binary * @throws IllegalAccessException If the method is inaccessible * @throws Throwable If the method throws an exception it is relayed */ public final void map (Method f) throws IllegalAccessException, Throwable { if (values == null) throw new UnsupportedOperationException ("Binary values may not be altered via map"); if (f.getParameterTypes().length!=1 || f.getParameterTypes()[0] != Double.class || f.getReturnType() != Double.class ) throw new IllegalArgumentException ("Method signature must be \"Double f (Double x)\""); try { for (int i=0 ; i<values.length ; i++) values[i] = ((Double)f.invoke (null, new Object[] {new Double(values[i])})).doubleValue (); } catch (InvocationTargetException e) { throw e.getTargetException(); } } /** Copy the contents of this vector into an array starting at a * particular location. * * @return Next available location in the array */ public final int arrayCopyInto (double[] array, int startingArrayLocation) { System.arraycopy (values, 0, array, startingArrayLocation, values.length); return startingArrayLocation + values.length; } /*********************************************************************** * VECTOR OPERATIONS ***********************************************************************/ public double dotProduct (double[] v) { double ret = 0; if (values == null) for (int i = 0; i < indices.length; i++) ret += v[indices[i]]; else for (int i = 0; i < indices.length; i++) ret += values[i] * v[indices[i]]; return ret; } public double dotProduct (ConstantMatrix m) { if (m instanceof SparseVector) return dotProduct ((SparseVector)m); else if (m instanceof DenseVector) return dotProduct ((DenseVector)m); else throw new IllegalArgumentException ("Unrecognized Matrix type "+m.getClass()); } public double dotProduct (DenseVector v) { if (v.hasInfinite || this.hasInfinite) return extendedDotProduct(v); double ret = 0; if (values == null) for (int i = 0; i < indices.length; i++) ret += v.value(indices[i]); else for (int i = 0; i < indices.length; i++) ret += values[i] * v.value(indices[i]); if (Double.isNaN(ret)) return extendedDotProduct(v); return ret; } // sets -Inf * 0 = 0; Inf * 0 = 0 public double extendedDotProduct (DenseVector v) { double ret = 0; if (values == null) for (int i = 0; i < indices.length; i++) ret += v.value(indices[i]); else for (int i = 0; i < indices.length; i++) { if (Double.isInfinite(values[i]) && v.value(indices[i])==0.0) { this.hasInfinite = true; continue; } else if (Double.isInfinite(v.value(indices[i])) && values[i]==0.0) { v.hasInfinite = true; continue; } ret += values[i] * v.value(indices[i]); } return ret; } public double dotProduct (SparseVector v) { if (v.hasInfinite || hasInfinite) return extendedDotProduct(v); double ret; // Decide in which direction to do the dot product. // This is a heuristic choice based on efficiency, and it could certainly // be more complicated. if (v instanceof IndexedSparseVector) { ret = v.dotProduct (this); } else if(numLocations() > v.numLocations ()) { ret = dotProductInternal (v, this); } else { ret = dotProductInternal (this, v); } if (Double.isNaN (ret)) return extendedDotProduct (v); return ret; } private double dotProductInternal (SparseVector vShort, SparseVector vLong) { double ret = 0; int numShortLocs = vShort.numLocations(); if (vShort.isBinary ()) { for(int i = 0; i < numShortLocs; i++) { ret += vLong.value (vShort.indexAtLocation(i)); } } else { for(int i = 0; i < numShortLocs; i++) { double v1 = vShort.valueAtLocation(i); double v2 = vLong.value (vShort.indexAtLocation(i)); ret += v1*v2; } } return ret; } // sets -Inf * 0 = 0, Inf * 0 = 0 public double extendedDotProduct (SparseVector v) { double ret = 0.0; SparseVector vShort = null; SparseVector vLong = null; // this ensures minimal computational effort if(numLocations() > v.numLocations ()) { vShort = v; vLong = this; } else { vShort = this; vLong = v; } for(int i = 0; i < vShort.numLocations(); i++) { double v1 = vShort.valueAtLocation(i); double v2 = vLong.value (vShort.indexAtLocation(i)); if (Double.isInfinite(v1) && v2==0.0) { vShort.hasInfinite = true; continue; } else if (Double.isInfinite(v2) && v1==0.0) { vLong.hasInfinite = true; continue; } ret += v1*v2; } return ret; } public SparseVector vectorAdd(SparseVector v, double scale) { if(indices != null) { // sparse SparseVector int [] ind = v.getIndices(); double [] val = v.getValues(); int [] newIndices = new int[ind.length+indices.length]; double [] newVals = new double[ind.length+indices.length]; for(int i = 0; i < indices.length; i++) { newIndices[i] = indices[i]; newVals[i] = values[i]; } for(int i = 0; i < ind.length; i++) { newIndices[i+indices.length] = ind[i]; newVals[i+indices.length] = scale*val[i]; } return new SparseVector(newIndices, newVals, true, true, false); } int [] newIndices = new int[values.length]; double [] newVals = new double[values.length]; // dense SparseVector int curPos = 0; for(int i = 0; i < values.length; i++) { double val = values[i]+scale*v.value(i); if(val != 0.0) { newIndices[curPos] = i; newVals[curPos++] = val; } } return new SparseVector(newIndices, newVals, true, true, false); } public double oneNorm () { double ret = 0; if (values == null) return indices.length; for (int i = 0; i < values.length; i++) ret += values[i]; return ret; } public double absNorm () { double ret = 0; if (values == null) return indices.length; for (int i = 0; i < values.length; i++) ret += Math.abs(values[i]); return ret; } public double twoNorm () { double ret = 0; if (values == null) return Math.sqrt (indices.length); for (int i = 0; i < values.length; i++) ret += values[i] * values[i]; return Math.sqrt (ret); } public double infinityNorm () { if (values == null) return 1.0; double max = Double.NEGATIVE_INFINITY; for (int i = 0; i < values.length; i++) if (Math.abs(values[i]) > max) max = Math.abs(values[i]); return max; } public void print() { if (values == null) { // binary sparsevector for (int i = 0; i < indices.length; i++) System.out.println ("SparseVector["+indices[i]+"] = 1.0"); } else { for (int i = 0; i < values.length; i++) { int idx = (indices == null) ? i : indices [i]; System.out.println ("SparseVector["+idx+"] = "+values[i]); } } } public boolean isNaN() { if (values == null) return false; return MatrixOps.isNaN(values); // for (int i = 0; i < values.length; i++) // if (Double.isNaN(values[i])) // return true; // return false; } // gsc: similar to isNaN but checks for infinite values public boolean isInfinite() { if (values == null) return false; return MatrixOps.isInfinite(values); } // gsc: returns true if any value is either NaN or infinite public boolean isNaNOrInfinite() { if (values == null) return false; return MatrixOps.isNaNOrInfinite(values); } protected void sortIndices () //public void sortIndices () //modified by Limin Yao { if (indices == null) // It's dense, and thus by definition sorted. return; if (values == null) java.util.Arrays.sort (indices); else { // Just BubbleSort; this is efficient when already mostly sorted. // Note that we BubbleSort from the the end forward; this is most efficient // when we have added a few additional items to the end of a previously sorted list. // We could be much smarter if we remembered the highest index that was already sorted for (int i = indices.length-1; i >= 0; i--) { boolean swapped = false; for (int j = 0; j < i; j++) if (indices[j] > indices[j+1]) { // Swap both indices and values int f; f = indices[j]; indices[j] = indices[j+1]; indices[j+1] = f; if (values != null) { double v; v = values[j]; values[j] = values[j+1]; values[j+1] = v; } swapped = true; } if (!swapped) break; } } //if (values == null) int numDuplicates = 0; for (int i = 1; i < indices.length; i++) if (indices[i-1] == indices[i]) numDuplicates++; if (numDuplicates > 0) removeDuplicates (numDuplicates); } // Argument zero is special value meaning that this function should count them. protected void removeDuplicates (int numDuplicates) { if (numDuplicates == 0) for (int i = 1; i < indices.length; i++) if (indices[i-1] == indices[i]) numDuplicates++; if (numDuplicates == 0) return; int[] newIndices = new int[indices.length - numDuplicates]; double[] newValues = values == null ? null : new double[indices.length - numDuplicates]; newIndices[0] = indices[0]; if (values != null) newValues[0] = values[0]; for (int i = 1, j = 1; i < indices.length; i++) { if (indices[i] == indices[i-1]) { if (newValues != null) newValues[j-1] += values[i]; } else { newIndices[j] = indices[i]; if (values != null) newValues[j] = values[i]; j++; } } this.indices = newIndices; this.values = newValues; } /// Serialization private static final long serialVersionUID = 2; private static final int CURRENT_SERIAL_VERSION = 1; private void writeObject (ObjectOutputStream out) throws IOException { if (this instanceof AugmentableFeatureVector) // Be sure to sort/compress our data before we write it ((AugmentableFeatureVector)this).sortIndices(); out.writeInt (CURRENT_SERIAL_VERSION); out.writeInt (indices == null ? -1 : indices.length); out.writeInt (values == null ? -1 : values.length); if (indices != null) for (int i = 0; i < indices.length; i++) out.writeInt (indices[i]); if (values != null) for (int i = 0; i < values.length; i++) out.writeDouble (values[i]); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); int indicesSize = in.readInt(); int valuesSize = in.readInt(); this.hasInfinite = false; if (indicesSize >= 0) { indices = new int[indicesSize]; for (int i = 0; i < indicesSize; i++) { indices[i] = in.readInt(); } } if (valuesSize >= 0) { values = new double[valuesSize]; for (int i = 0; i < valuesSize; i++) { values[i] = in.readDouble(); if (Double.isInfinite (values[i])) this.hasInfinite = true; } } } }
28,980
27.552709
130
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/FeatureSelection.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** A subset of features. @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.types; import java.util.BitSet; import java.util.regex.Pattern; import java.io.*; import cc.mallet.types.*; public class FeatureSelection implements AlphabetCarrying, Serializable { Alphabet dictionary; BitSet selectedFeatures; // boolean defaultValue; //Implement this by using it to reverse all the exterior interfaces public FeatureSelection (Alphabet dictionary, BitSet selectedFeatures) { this.dictionary = dictionary; this.selectedFeatures = selectedFeatures; } public FeatureSelection (Alphabet dictionary) { this.dictionary = dictionary; this.selectedFeatures = new BitSet(); } public FeatureSelection (RankedFeatureVector rsv, int numFeatures) { this.dictionary = rsv.getAlphabet(); this.selectedFeatures = new BitSet (dictionary.size()); int numSelections = Math.min (numFeatures, dictionary.size()); for (int i = 0; i < numSelections; i++) selectedFeatures.set (rsv.getIndexAtRank(i)); } /** Creates a FeatureSelection that includes only those features whose names match a given regex. * A static factory method. * @param dictionary A dictionary of fetaure names. Entries must be string. * @param regex Features whose names match this pattern will be included. * @return A new FeatureSelection. * */ public static FeatureSelection createFromRegex (Alphabet dictionary, Pattern regex) { BitSet included = new BitSet (dictionary.size()); for (int i = 0; i < dictionary.size(); i++) { String feature = (String) dictionary.lookupObject (i); if (regex.matcher (feature).matches()) { included.set (i); } } return new FeatureSelection (dictionary, included); } public Object clone () { return new FeatureSelection (dictionary, (BitSet)selectedFeatures.clone()); } public Alphabet getAlphabet () { return dictionary; } public Alphabet[] getAlphabets () { return new Alphabet[] { dictionary }; } public int cardinality () { return selectedFeatures.cardinality(); } public BitSet getBitSet () { return selectedFeatures; } public void add (Object o) { add (dictionary.lookupIndex(o)); } public void add (int index) { assert (index >= 0); selectedFeatures.set (index); } public void remove (Object o) { remove (dictionary.lookupIndex(o)); } public void remove (int index) { selectedFeatures.set (index, false); } public boolean contains (Object o) { int index = dictionary.lookupIndex (o, false); if (index == -1) return false; return contains (index); } public boolean contains (int index) { return selectedFeatures.get (index); } public void or (FeatureSelection fs) { selectedFeatures.or (fs.selectedFeatures); } public int nextSelectedIndex (int index) { return selectedFeatures.nextSetBit (index); } public int nextDeselectedIndex (int index) { return selectedFeatures.nextClearBit (index); } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; static final int NULL_INTEGER = -1; /* Need to check for null pointers. */ private void writeObject (ObjectOutputStream out) throws IOException { int i, size; out.writeInt(CURRENT_SERIAL_VERSION); out.writeObject(dictionary); out.writeObject(selectedFeatures); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int size, i; int version = in.readInt (); dictionary = (Alphabet) in.readObject(); selectedFeatures = (BitSet) in.readObject(); } }
4,122
23.688623
99
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Labelings.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.types; import cc.mallet.types.Label; /** A collection of labelings, either for a multi-label problem (all labels are part of the same label dictionary), or a factorized labeling, (each label is part of a different dictionary). @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ public class Labelings implements AlphabetCarrying { Labeling[] labels; public Labelings (Labeling[] labels) { for (int i = 0; i < labels.length-1; i++) if (!Alphabet.alphabetsMatch(labels[i], labels[i+1])) throw new IllegalArgumentException ("Alphabets do not match"); this.labels = new Labeling[labels.length]; System.arraycopy (labels, 0, this.labels, 0, labels.length); } public Alphabet getAlphabet () { return labels[0].getAlphabet(); } public Alphabet[] getAlphabets () { return labels[0].getAlphabets(); } int size () { return labels.length; } Labeling get (int i) { return labels[i]; } }
1,398
30.795455
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Metric.java
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** Interface for a measure of distance between two <CODE>SparseVector</CODE>s @author Jerod Weinman <A HREF="mailto:[email protected]">[email protected]</A> */ package cc.mallet.types; import cc.mallet.types.SparseVector; public interface Metric { public double distance( SparseVector a, SparseVector b); }
747
31.521739
88
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Dirichlet.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ package cc.mallet.types; import gnu.trove.TIntHashSet; import gnu.trove.TIntIntHashMap; import gnu.trove.TIntIterator; import java.io.BufferedWriter; import java.io.FileWriter; import java.io.IOException; import java.io.PrintWriter; import java.text.NumberFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import cc.mallet.types.Multinomial; import cc.mallet.util.Maths; import cc.mallet.util.Randoms; /** * Various useful functions related to Dirichlet distributions. * * @author Andrew McCallum and David Mimno */ public class Dirichlet { Alphabet dict; double magnitude = 1; double[] partition; Randoms random = null; /** Actually the negative Euler-Mascheroni constant */ public static final double EULER_MASCHERONI = -0.5772156649015328606065121; public static final double PI_SQUARED_OVER_SIX = Math.PI * Math.PI / 6; public static final double HALF_LOG_TWO_PI = Math.log(2 * Math.PI) / 2; public static final double DIGAMMA_COEF_1 = 1/12; public static final double DIGAMMA_COEF_2 = 1/120; public static final double DIGAMMA_COEF_3 = 1/252; public static final double DIGAMMA_COEF_4 = 1/240; public static final double DIGAMMA_COEF_5 = 1/132; public static final double DIGAMMA_COEF_6 = 691/32760; public static final double DIGAMMA_COEF_7 = 1/12; public static final double DIGAMMA_COEF_8 = 3617/8160; public static final double DIGAMMA_COEF_9 = 43867/14364; public static final double DIGAMMA_COEF_10 = 174611/6600; public static final double DIGAMMA_LARGE = 9.5; public static final double DIGAMMA_SMALL = .000001; /** A dirichlet parameterized by a distribution and a magnitude * * @param m The magnitude of the Dirichlet: sum_i alpha_i * @param p A probability distribution: p_i = alpha_i / m */ public Dirichlet (double m, double[] p) { magnitude = m; partition = p; } /** A symmetric dirichlet: E(X_i) = E(X_j) for all i, j * * @param m The magnitude of the Dirichlet: sum_i alpha_i * @param n The number of dimensions */ /* public Dirichlet (double m, int n) { magnitude = m; partition = new double[n]; partition[0] = 1.0 / n; for (int i=1; i<n; i++) { partition[i] = partition[0]; } } */ /** A dirichlet parameterized with a single vector of positive reals */ public Dirichlet(double[] p) { magnitude = 0; partition = new double[p.length]; // Add up the total for (int i=0; i<p.length; i++) { magnitude += p[i]; } for (int i=0; i<p.length; i++) { partition[i] = p[i] / magnitude; } } /** Constructor that takes an alphabet representing the * meaning of each dimension */ public Dirichlet (double[] alphas, Alphabet dict) { this(alphas); if (dict != null && alphas.length != dict.size()) throw new IllegalArgumentException ("alphas and dict sizes do not match."); this.dict = dict; if (dict != null) dict.stopGrowth(); } /** * A symmetric Dirichlet with alpha_i = 1.0 and the * number of dimensions of the given alphabet. */ public Dirichlet (Alphabet dict) { this (dict, 1.0); } /** * A symmetric Dirichlet with alpha_i = <code>alpha</code> and the * number of dimensions of the given alphabet. */ public Dirichlet (Alphabet dict, double alpha) { this(dict.size(), alpha); this.dict = dict; dict.stopGrowth(); } /** A symmetric Dirichlet with alpha_i = 1.0 and <code>size</code> dimensions */ public Dirichlet (int size) { this (size, 1.0); } /** A symmetric dirichlet: E(X_i) = E(X_j) for all i, j * * @param n The number of dimensions * @param alpha The parameter for each dimension */ public Dirichlet (int size, double alpha) { magnitude = size * alpha; partition = new double[size]; partition[0] = 1.0 / size; for (int i=1; i<size; i++) { partition[i] = partition[0]; } } private void initRandom() { if (random == null) { random = new Randoms(); } } public double[] nextDistribution() { double distribution[] = new double[partition.length]; initRandom(); // For each dimension, draw a sample from Gamma(mp_i, 1) double sum = 0; for (int i=0; i<distribution.length; i++) { distribution[i] = random.nextGamma(partition[i] * magnitude, 1); if (distribution[i] <= 0) { distribution[i] = 0.0001; } sum += distribution[i]; } // Normalize for (int i=0; i<distribution.length; i++) { distribution[i] /= sum; } return distribution; } /** * Create a printable list of alpha_i parameters */ public static String distributionToString(double magnitude, double[] distribution) { StringBuffer output = new StringBuffer(); NumberFormat formatter = NumberFormat.getInstance(); formatter.setMaximumFractionDigits(5); output.append(formatter.format(magnitude) + ":\t"); for (int i=0; i<distribution.length; i++) { output.append(formatter.format(distribution[i]) + "\t"); } return output.toString(); } /** Write the parameters alpha_i to the specified file, one * per line */ public void toFile(String filename) throws IOException { PrintWriter out = new PrintWriter(new BufferedWriter(new FileWriter(filename))); for (int i=0; i<partition.length; i++) { out.println(magnitude * partition[i]); } out.flush(); out.close(); } /** Dirichlet-multinomial: draw a distribution from the dirichlet, then draw n samples from that multinomial. */ public int[] drawObservation(int n) { initRandom(); double[] distribution = nextDistribution(); return drawObservation(n, distribution); } /** * Draw a count vector from the probability distribution provided. * * @param n The <i>expected</i> total number of counts in the returned vector. The actual number is ~ Poisson(<code>n</code>) */ public int[] drawObservation(int n, double[] distribution) { initRandom(); int[] histogram = new int[partition.length]; Arrays.fill(histogram, 0); int count; // I was using a poisson, but the poisson variate generator // goes berzerk for lambda above ~500. if (n < 100) { count = random.nextPoisson(); } else { // p(N(100, 10) <= 0) = 7.619853e-24 count = (int) Math.round(random.nextGaussian(n, n)); } for (int i=0; i<count; i++) { histogram[random.nextDiscrete(distribution)]++; } return histogram; } /** Create a set of d draws from a dirichlet-multinomial, each * with an average of n observations. */ public Object[] drawObservations(int d, int n) { Object[] observations = new Object[d]; for (int i=0; i<d; i++) { observations[i] = drawObservation(n); } return observations; } /** This calculates a log gamma function exactly. * It's extremely inefficient -- use this for comparison only. */ public static double logGammaDefinition(double z) { double result = EULER_MASCHERONI * z - Math.log(z); for (int k=1; k < 10000000; k++) { result += (z/k) - Math.log(1 + (z/k)); } return result; } /** This directly calculates the difference between two * log gamma functions using a recursive formula. * The break-even with the Stirling approximation is about * n=2, so it's not necessarily worth using this. */ public static double logGammaDifference(double z, int n) { double result = 0.0; for (int i=0; i < n; i++) { result += Math.log(z + i); } return result; } /** Currently aliased to <code>logGammaStirling</code> */ public static double logGamma(double z) { return logGammaStirling(z); } /** Use a fifth order Stirling's approximation. * * @param z Note that Stirling's approximation is increasingly unstable as <code>z</code> approaches 0. If <code>z</code> is less than 2, we shift it up, calculate the approximation, and then shift the answer back down. */ public static double logGammaStirling(double z) { int shift = 0; while (z < 2) { z++; shift++; } double result = HALF_LOG_TWO_PI + (z - 0.5) * Math.log(z) - z + 1/(12 * z) - 1 / (360 * z * z * z) + 1 / (1260 * z * z * z * z * z); while (shift > 0) { shift--; z--; result -= Math.log(z); } return result; } /** Gergo Nemes' approximation */ public static double logGammaNemes(double z) { double result = HALF_LOG_TWO_PI - (Math.log(z) / 2) + z * (Math.log(z + (1/(12 * z - (1/(10*z))))) - 1); return result; } /** Calculate digamma using an asymptotic expansion involving Bernoulli numbers. */ public static double digamma(double z) { // This is based on matlab code by Tom Minka // if (z < 0) { System.out.println(" less than zero"); } double psi = 0; if (z < DIGAMMA_SMALL) { psi = EULER_MASCHERONI - (1 / z); // + (PI_SQUARED_OVER_SIX * z); /*for (int n=1; n<100000; n++) { psi += z / (n * (n + z)); }*/ return psi; } while (z < DIGAMMA_LARGE) { psi -= 1 / z; z++; } double invZ = 1/z; double invZSquared = invZ * invZ; psi += Math.log(z) - .5 * invZ - invZSquared * (DIGAMMA_COEF_1 - invZSquared * (DIGAMMA_COEF_2 - invZSquared * (DIGAMMA_COEF_3 - invZSquared * (DIGAMMA_COEF_4 - invZSquared * (DIGAMMA_COEF_5 - invZSquared * (DIGAMMA_COEF_6 - invZSquared * DIGAMMA_COEF_7)))))); return psi; } public static double digammaDifference(double x, int n) { double sum = 0; for (int i=0; i<n; i++) { sum += 1 / (x + i); } return sum; } public static double trigamma(double z) { int shift = 0; while (z < 2) { z++; shift++; } double oneOverZ = 1.0 / z; double oneOverZSquared = oneOverZ * oneOverZ; double result = oneOverZ + 0.5 * oneOverZSquared + 0.1666667 * oneOverZSquared * oneOverZ - 0.03333333 * oneOverZSquared * oneOverZSquared * oneOverZ + 0.02380952 * oneOverZSquared * oneOverZSquared * oneOverZSquared * oneOverZ - 0.03333333 * oneOverZSquared * oneOverZSquared * oneOverZSquared * oneOverZSquared * oneOverZ; System.out.println(z + " -> " + result); while (shift > 0) { shift--; z--; result += 1.0 / (z * z); System.out.println(z + " -> " + result); } return result; } /** * Learn the concentration parameter of a symmetric Dirichlet using frequency histograms. * Since all parameters are the same, we only need to keep track of * the number of observation/dimension pairs with count N * * @param countHistogram An array of frequencies. If the matrix X represents observations such that x<sub>dt</sub> is how many times word t occurs in document d, <code>countHistogram[3]</code> is the total number of cells <i>in any column</i> that equal 3. * @param observationLengths A histogram of sample lengths, for example <code>observationLengths[20]</code> could be the number of documents that are exactly 20 tokens long. * @param numDimensions The total number of dimensions. * @param currentValue An initial starting value. */ public static double learnSymmetricConcentration(int[] countHistogram, int[] observationLengths, int numDimensions, double currentValue) { double currentDigamma; // The histogram arrays are presumably allocated before // we knew what went in them. It is therefore likely that // the largest non-zero value may be much closer to the // beginning than the end. We don't want to iterate over // a whole bunch of zeros, so keep track of the last value. int largestNonZeroCount = 0; int[] nonZeroLengthIndex = new int[ observationLengths.length ]; for (int index = 0; index < countHistogram.length; index++) { if (countHistogram[index] > 0) { largestNonZeroCount = index; } } int denseIndex = 0; for (int index = 0; index < observationLengths.length; index++) { if (observationLengths[index] > 0) { nonZeroLengthIndex[denseIndex] = index; denseIndex++; } } int denseIndexSize = denseIndex; for (int iteration = 1; iteration <= 200; iteration++) { double currentParameter = currentValue / numDimensions; // Calculate the numerator currentDigamma = 0; double numerator = 0; // Counts of 0 don't matter, so start with 1 for (int index = 1; index <= largestNonZeroCount; index++) { currentDigamma += 1.0 / (currentParameter + index - 1); numerator += countHistogram[index] * currentDigamma; } // Now calculate the denominator, a sum over all observation lengths currentDigamma = 0; double denominator = 0; int previousLength = 0; double cachedDigamma = digamma(currentValue); for (denseIndex = 0; denseIndex < denseIndexSize; denseIndex++) { int length = nonZeroLengthIndex[denseIndex]; if (length - previousLength > 20) { // If the next length is sufficiently far from the previous, // it's faster to recalculate from scratch. currentDigamma = digamma(currentValue + length) - cachedDigamma; } else { // Otherwise iterate up. This looks slightly different // from the previous version (no -1) because we're indexing differently. for (int index = previousLength; index < length; index++) { currentDigamma += 1.0 / (currentValue + index); } } denominator += currentDigamma * observationLengths[length]; } currentValue = currentParameter * numerator / denominator; ///System.out.println(currentValue + " = " + currentParameter + " * " + numerator + " / " + denominator); } return currentValue; } public static void testSymmetricConcentration(int numDimensions, int numObservations, int observationMeanLength) { double logD = Math.log(numDimensions); for (int exponent = -5; exponent < 4; exponent++) { double alpha = numDimensions * 1.0; Dirichlet prior = new Dirichlet(numDimensions, alpha / numDimensions); int[] countHistogram = new int[ 1000000 ]; int[] observationLengths = new int[ 1000000 ]; Object[] observations = prior.drawObservations(numObservations, observationMeanLength); Dirichlet optimizedDirichlet = new Dirichlet(numDimensions, 1.0); optimizedDirichlet.learnParametersWithHistogram(observations); System.out.println(optimizedDirichlet.magnitude); for (int i=0; i < numObservations; i++) { int[] observation = (int[]) observations[i]; int total = 0; for (int k=0; k < numDimensions; k++) { if (observation[k] > 0) { total += observation[k]; countHistogram[ observation[k] ]++; } } observationLengths[ total ]++; } double estimatedAlpha = learnSymmetricConcentration(countHistogram, observationLengths, numDimensions, 1.0); System.out.println(alpha + "\t" + estimatedAlpha + "\t" + Math.abs(alpha - estimatedAlpha)); } } /** * Learn Dirichlet parameters using frequency histograms * * @param parameters A reference to the current values of the parameters, which will be updated in place * @param observations An array of count histograms. <code>observations[10][3]</code> could be the number of documents that contain exactly 3 tokens of word type 10. * @param observationLengths A histogram of sample lengths, for example <code>observationLengths[20]</code> could be the number of documents that are exactly 20 tokens long. * @returns The sum of the learned parameters. */ public static double learnParameters(double[] parameters, int[][] observations, int[] observationLengths) { return learnParameters(parameters, observations, observationLengths, 1.00001, 1.0, 200); } /** * Learn Dirichlet parameters using frequency histograms * * @param parameters A reference to the current values of the parameters, which will be updated in place * @param observations An array of count histograms. <code>observations[10][3]</code> could be the number of documents that contain exactly 3 tokens of word type 10. * @param observationLengths A histogram of sample lengths, for example <code>observationLengths[20]</code> could be the number of documents that are exactly 20 tokens long. * @param shape Gamma prior E(X) = shape * scale, var(X) = shape * scale<sup>2</sup> * @param scale * @param numIterations 200 to 1000 generally insures convergence, but 1-5 is often enough to step in the right direction * @returns The sum of the learned parameters. */ public static double learnParameters(double[] parameters, int[][] observations, int[] observationLengths, double shape, double scale, int numIterations) { int i, k; double parametersSum = 0; // Initialize the parameter sum for (k=0; k < parameters.length; k++) { parametersSum += parameters[k]; } double oldParametersK; double currentDigamma; double denominator; int nonZeroLimit; int[] nonZeroLimits = new int[observations.length]; Arrays.fill(nonZeroLimits, -1); // The histogram arrays go up to the size of the largest document, // but the non-zero values will almost always cluster in the low end. // We avoid looping over empty arrays by saving the index of the largest // non-zero value. int[] histogram; for (i=0; i<observations.length; i++) { histogram = observations[i]; //StringBuffer out = new StringBuffer(); for (k = 0; k < histogram.length; k++) { if (histogram[k] > 0) { nonZeroLimits[i] = k; //out.append(k + ":" + histogram[k] + " "); } } //System.out.println(out); } for (int iteration=0; iteration<numIterations; iteration++) { // Calculate the denominator denominator = 0; currentDigamma = 0; // Iterate over the histogram: for (i=1; i<observationLengths.length; i++) { currentDigamma += 1 / (parametersSum + i - 1); denominator += observationLengths[i] * currentDigamma; } // Bayesian estimation Part I denominator -= 1/scale; // Calculate the individual parameters parametersSum = 0; for (k=0; k<parameters.length; k++) { // What's the largest non-zero element in the histogram? nonZeroLimit = nonZeroLimits[k]; oldParametersK = parameters[k]; parameters[k] = 0; currentDigamma = 0; histogram = observations[k]; for (i=1; i <= nonZeroLimit; i++) { currentDigamma += 1 / (oldParametersK + i - 1); parameters[k] += histogram[i] * currentDigamma; } // Bayesian estimation part II parameters[k] = (oldParametersK * parameters[k] + shape) / denominator; parametersSum += parameters[k]; } } return parametersSum; } /** Use the fixed point iteration described by Tom Minka. */ public long learnParametersWithHistogram(Object[] observations) { int maxLength = 0; int[] maxBinCounts = new int[partition.length]; Arrays.fill(maxBinCounts, 0); for (int i=0; i < observations.length; i++) { int length = 0; int[] observation = (int[]) observations[i]; for (int bin=0; bin < observation.length; bin++) { if (observation[bin] > maxBinCounts[bin]) { maxBinCounts[bin] = observation[bin]; } length += observation[bin]; } if (length > maxLength) { maxLength = length; } } // Arrays start at zero, so I'm sacrificing one int for greater clarity // later on... int[][] binCountHistograms = new int[partition.length][]; for (int bin=0; bin < partition.length; bin++) { binCountHistograms[bin] = new int[ maxBinCounts[bin] + 1 ]; Arrays.fill(binCountHistograms[bin], 0); } // System.out.println("got mem: " + (System.currentTimeMillis() - start)); int[] lengthHistogram = new int[maxLength + 1]; Arrays.fill(lengthHistogram, 0); // System.out.println("got lengths: " + (System.currentTimeMillis() - start)); for (int i=0; i < observations.length; i++) { int length = 0; int[] observation = (int[]) observations[i]; for (int bin=0; bin < observation.length; bin++) { binCountHistograms[bin][ observation[bin] ]++; length += observation[bin]; } lengthHistogram[length]++; } return learnParametersWithHistogram(binCountHistograms, lengthHistogram); } public long learnParametersWithHistogram(int[][] binCountHistograms, int[] lengthHistogram) { long start = System.currentTimeMillis(); double[] newParameters = new double[partition.length]; double alphaK; double currentDigamma; double denominator; double parametersSum = 0.0; int i, k; for (k = 0; k < partition.length; k++) { newParameters[k] = magnitude * partition[k]; parametersSum += newParameters[k]; } for (int iteration=0; iteration<1000; iteration++) { // Calculate the denominator denominator = 0; currentDigamma = 0; for (i=1; i < lengthHistogram.length; i++) { currentDigamma += 1 / (parametersSum + i - 1); denominator += lengthHistogram[i] * currentDigamma; } assert(denominator > 0.0); assert(! Double.isNaN(denominator)); parametersSum = 0.0; // Calculate the individual parameters for (k=0; k<partition.length; k++) { alphaK = newParameters[k]; newParameters[k] = 0.0; currentDigamma = 0; int[] histogram = binCountHistograms[k]; if (histogram.length <= 1) { // Since histogram[0] is for 0... newParameters[k] = 0.000001; } else { for (i=1; i<histogram.length; i++) { currentDigamma += 1 / (alphaK + i - 1); newParameters[k] += histogram[i] * currentDigamma; } } if (! (newParameters[k] > 0.0)) { System.out.println("length of empty array: " + (new int[0]).length); for (i=0; i<histogram.length; i++) { System.out.print(histogram[i] + " "); } System.out.println(); } assert(newParameters[k] > 0.0); assert(! Double.isNaN(newParameters[k])); newParameters[k] *= alphaK / denominator; parametersSum += newParameters[k]; } /* try { if (iteration % 25 == 0) { //System.out.println(distributionToString(parametersSum, newParameters)); //toFile("../newsgroups/direct/iteration" + iteration); //System.out.println(iteration + ": " + (System.currentTimeMillis() - start)); } } catch (Exception e) { System.out.println(e); } */ } for (k = 0; k < partition.length; k++) { partition[k] = newParameters[k] / parametersSum; magnitude = parametersSum; } // System.out.println(distributionToString(magnitude, partition)); return System.currentTimeMillis() - start; } /** Use the fixed point iteration described by Tom Minka. */ public long learnParametersWithDigamma(Object[] observations) { int[][] binCounts = new int[partition.length][observations.length]; // System.out.println("got mem: " + (System.currentTimeMillis() - start)); int[] observationLengths = new int[observations.length]; // System.out.println("got lengths: " + (System.currentTimeMillis() - start)); for (int i=0; i < observations.length; i++) { int[] observation = (int[]) observations[i]; for (int bin=0; bin < partition.length; bin++) { binCounts[bin][i] = observation[bin]; observationLengths[i] += observation[bin]; } } // System.out.println("init: " + (System.currentTimeMillis() - start)); return learnParametersWithDigamma(binCounts, observationLengths); } public long learnParametersWithDigamma(int[][] binCounts, int[] observationLengths) { long start = System.currentTimeMillis(); double[] newParameters = new double[partition.length]; double alphaK; double denominator; double newMagnitude; int i, k; for (int iteration=0; iteration<1000; iteration++) { newMagnitude = 0; // Calculate the denominator denominator = 0; for (i=0; i<observationLengths.length; i++) { denominator += digamma(magnitude + observationLengths[i]); } denominator -= observationLengths.length * digamma(magnitude); // Calculate the individual parameters for (k=0; k<partition.length; k++) { newParameters[k] = 0; int[] counts = binCounts[k]; alphaK = magnitude * partition[k]; double digammaAlphaK = digamma(alphaK); for (i=0; i<counts.length; i++) { if (counts[i] == 0) { newParameters[k] += digammaAlphaK; } else { newParameters[k] += digamma(alphaK + counts[i]); } } newParameters[k] -= counts.length * digammaAlphaK; if (newParameters[k] <= 0) { newParameters[k] = 0.000001; } else { newParameters[k] *= alphaK / denominator; } if (newParameters[k] <= 0) { System.out.println(newParameters[k] + "\t" + alphaK + "\t" + denominator); } assert(newParameters[k] > 0); assert(! Double.isNaN(newParameters[k])); newMagnitude += newParameters[k]; // System.out.println("finished dimension " + k); } magnitude = newMagnitude; for (k=0; k<partition.length; k++) { partition[k] = newParameters[k] / magnitude; /* if (k < 20) { System.out.println(partition[k]+" = "+newParameters[k]+" / "+magnitude); } */ } /* try { if (iteration % 25 == 0) { toFile("../newsgroups/digamma/iteration" + iteration); //System.out.println(iteration + ": " + (System.currentTimeMillis() - start)); } } catch (Exception e) { System.out.println(e); } */ } // System.out.println(distributionToString(magnitude, partition)); return System.currentTimeMillis() - start; } /** Estimate a dirichlet with the moment matching method * described by Ronning. */ public long learnParametersWithMoments(Object[] observations) { long start = System.currentTimeMillis(); int i, bin; int[] observationLengths = new int[observations.length]; double[] variances = new double[partition.length]; Arrays.fill(partition, 0.0); Arrays.fill(observationLengths, 0); Arrays.fill(variances, 0.0); // Find E[p_k]'s for (i=0; i < observations.length; i++) { int[] observation = (int[]) observations[i]; // Find the sum of counts in each bin for (bin=0; bin < partition.length; bin++) { observationLengths[i] += observation[bin]; } for (bin=0; bin < partition.length; bin++) { partition[bin] += (double) observation[bin] / observationLengths[i]; } } for (bin=0; bin < partition.length; bin++) { partition[bin] /= observations.length; } // Find var[p_k]'s double difference; for (i=0; i < observations.length; i++) { int[] observation = (int[]) observations[i]; for (bin=0; bin < partition.length; bin++) { difference = ((double) observation[bin] / observationLengths[i]) - partition[bin]; variances[bin] += difference * difference; // avoiding Math.pow... } } for (bin=0; bin < partition.length; bin++) { variances[bin] /= observations.length - 1; } // Now calculate the magnitude: // log \sum_k \alpha_k = 1/(K-1) \sum_k log[ ( E[p_k](1 - E[p_k]) / var[p_k] ) - 1 ] double sum = 0.0; for (bin=0; bin < partition.length; bin++) { if (partition[bin] == 0) { continue; } sum += Math.log(( partition[bin] * ( 1 - partition[bin] ) / variances[bin] ) - 1); } magnitude = Math.exp(sum / (partition.length - 1)); //System.out.println(distributionToString(magnitude, partition)); return System.currentTimeMillis() - start; } public long learnParametersWithLeaveOneOut(Object[] observations) { int[][] binCounts = new int[partition.length][observations.length]; // System.out.println("got mem: " + (System.currentTimeMillis() - start)); int[] observationLengths = new int[observations.length]; // System.out.println("got lengths: " + (System.currentTimeMillis() - start)); for (int i=0; i < observations.length; i++) { int[] observation = (int[]) observations[i]; for (int bin=0; bin < partition.length; bin++) { binCounts[bin][i] = observation[bin]; observationLengths[i] += observation[bin]; } } // System.out.println("init: " + (System.currentTimeMillis() - start)); return learnParametersWithLeaveOneOut(binCounts, observationLengths); } /** Learn parameters using Minka's Leave-One-Out (LOO) likelihood */ public long learnParametersWithLeaveOneOut(int[][] binCounts, int[] observationLengths) { long start = System.currentTimeMillis(); int i, bin; double[] newParameters = new double[partition.length]; double[] binSums = new double[partition.length]; double observationSum = 0.0; double parameterSum = 0.0; int[] counts; // Uniform initialization // Arrays.fill(partition, 1.0 / partition.length); for (int iteration = 0; iteration < 1000; iteration++) { observationSum = 0.0; Arrays.fill(binSums, 0.0); for (i=0; i < observationLengths.length; i++) { observationSum += (observationLengths[i] / (observationLengths[i] - 1 + magnitude)); } for (bin=0; bin < partition.length; bin++) { counts = binCounts[bin]; for (i=0; i<counts.length; i++) { if (counts[i] >= 2) { binSums[bin] += (counts[i] / (counts[i] - 1 + (magnitude * partition[bin]))); } } } parameterSum = 0.0; for (bin=0; bin < partition.length; bin++) { if (binSums[bin] == 0.0) { newParameters[bin] = 0.000001; } else { newParameters[bin] = (partition[bin] * magnitude * binSums[bin] / observationSum); } parameterSum += newParameters[bin]; } for (bin=0; bin < partition.length; bin++) { partition[bin] = newParameters[bin] / parameterSum; } magnitude = parameterSum; /* if (iteration % 50 == 0) { System.out.println(iteration + ": " + magnitude); } */ } // System.out.println(distributionToString(magnitude, partition)); return System.currentTimeMillis() - start; } /** Compute the L1 residual between two dirichlets */ public double absoluteDifference(Dirichlet other) { if (partition.length != other.partition.length) { throw new IllegalArgumentException("dirichlets must have the same dimension to be compared"); } double residual = 0.0; for (int k=0; k<partition.length; k++) { residual += Math.abs((partition[k] * magnitude) - (other.partition[k] * other.magnitude)); } return residual; } /** Compute the L2 residual between two dirichlets */ public double squaredDifference(Dirichlet other) { if (partition.length != other.partition.length) { throw new IllegalArgumentException("dirichlets must have the same dimension to be compared"); } double residual = 0.0; for (int k=0; k<partition.length; k++) { residual += Math.pow((partition[k] * magnitude) - (other.partition[k] * other.magnitude), 2); } return residual; } public void checkBreakeven(double x) { long start, clock1, clock2; double digammaX = digamma(x); for (int n=1; n < 100; n++) { start = System.currentTimeMillis(); for (int i=0; i<1000000; i++) { digamma(x + n); } clock1 = System.currentTimeMillis() - start; start = System.currentTimeMillis(); for (int i=0; i<1000000; i++) { digammaDifference(x, n); } clock2 = System.currentTimeMillis() - start; System.out.println(n + "\tdirect: " + clock1 + "\tindirect: " + clock2 + " (" + (clock1 - clock2) + ")"); System.out.println(" " + (digamma(x + n) - digammaX) + " " + digammaDifference(x, n)); } } public static String compare(double sum, int k, int n, int w) { Dirichlet uniformDirichlet, dirichlet; StringBuffer output = new StringBuffer(); output.append(sum + "\t" + k + "\t" + n + "\t" + w + "\t"); uniformDirichlet = new Dirichlet(k, sum/k); dirichlet = new Dirichlet(sum, uniformDirichlet.nextDistribution()); // System.out.println("real: " + distributionToString(dirichlet.magnitude, // dirichlet.partition)); Object[] observations = dirichlet.drawObservations(n, w); // System.out.println("Done drawing..."); long time; Dirichlet estimatedDirichlet = new Dirichlet(k, sum/k); time = estimatedDirichlet.learnParametersWithDigamma(observations); output.append(time + "\t" + dirichlet.absoluteDifference(estimatedDirichlet) + "\t"); estimatedDirichlet = new Dirichlet(k, sum/k); time = estimatedDirichlet.learnParametersWithHistogram(observations); output.append(time + "\t" + dirichlet.absoluteDifference(estimatedDirichlet) + "\t"); estimatedDirichlet = new Dirichlet(k, sum/k); time = estimatedDirichlet.learnParametersWithMoments(observations); output.append(time + "\t" + dirichlet.absoluteDifference(estimatedDirichlet) + "\t"); // System.out.println("Moments: " + time + ", " + // dirichlet.absoluteDifference(estimatedDirichlet)); estimatedDirichlet = new Dirichlet(k, sum/k); time = estimatedDirichlet.learnParametersWithLeaveOneOut(observations); output.append(time + "\t" + dirichlet.absoluteDifference(estimatedDirichlet) + "\t"); // System.out.println("Leave One Out: " + time + ", " + // dirichlet.absoluteDifference(estimatedDirichlet)); return output.toString(); } /** What is the probability that these two observations were drawn from * the same multinomial with symmetric Dirichlet prior alpha, relative * to the probability that they were drawn from different multinomials * both drawn from this Dirichlet? */ public static double dirichletMultinomialLikelihoodRatio(TIntIntHashMap countsX, TIntIntHashMap countsY, double alpha, double alphaSum) { // The likelihood for one DCM is // Gamma( alpha_sum ) prod Gamma( alpha + N_i ) // prod Gamma ( alpha ) Gamma ( alpha_sum + N ) // When we divide this by the product of two other DCMs with the same // alpha parameter, the first term in the numerator cancels with the // first term in the denominator. Then moving the remaining alpha-only // term to the numerator, we get // prod Gamma(alpha) prod Gamma( alpha + X_i + Y_i ) // Gamma (alpha_sum) Gamma( alpha_sum + X_sum + Y_sum ) // ---------------------------------------------------------- // prod Gamma(alpha + X_i) prod Gamma(alpha + Y_i) // Gamma( alpha_sum + X_sum ) Gamma( alpha_sum + Y_sum ) double logLikelihood = 0.0; double logGammaAlpha = logGamma(alpha); int totalX = 0; int totalY = 0; int key, x, y; TIntHashSet distinctKeys = new TIntHashSet(); distinctKeys.addAll(countsX.keys()); distinctKeys.addAll(countsY.keys()); TIntIterator iterator = distinctKeys.iterator(); while (iterator.hasNext()) { key = iterator.next(); x = 0; if (countsX.containsKey(key)) { x = countsX.get(key); } y = 0; if (countsY.containsKey(key)) { y = countsY.get(key); } totalX += x; totalY += y; logLikelihood += logGamma(alpha) + logGamma(alpha + x + y) - logGamma(alpha + x) - logGamma(alpha + y); } logLikelihood += logGamma(alphaSum + totalX) + logGamma(alphaSum + totalY) - logGamma(alphaSum) - logGamma(alphaSum + totalX + totalY); return logLikelihood; } /** What is the probability that these two observations were drawn from * the same multinomial with symmetric Dirichlet prior alpha, relative * to the probability that they were drawn from different multinomials * both drawn from this Dirichlet? */ public static double dirichletMultinomialLikelihoodRatio(int[] countsX, int[] countsY, double alpha, double alphaSum) { // This is exactly the same as the method that takes // Trove hashmaps, but with fixed size arrays. if (countsX.length != countsY.length) { throw new IllegalArgumentException("both arrays must contain the same number of dimensions"); } double logLikelihood = 0.0; double logGammaAlpha = logGamma(alpha); int totalX = 0; int totalY = 0; int x, y; for (int key=0; key < countsX.length; key++) { x = countsX[key]; y = countsY[key]; totalX += x; totalY += y; logLikelihood += logGammaAlpha + logGamma(alpha + x + y) - logGamma(alpha + x) - logGamma(alpha + y); } logLikelihood += logGamma(alphaSum + totalX) + logGamma(alphaSum + totalY) - logGamma(alphaSum) - logGamma(alphaSum + totalX + totalY); return logLikelihood; } /** This version uses a non-symmetric Dirichlet prior */ public double dirichletMultinomialLikelihoodRatio(int[] countsX, int[] countsY) { if (countsX.length != countsY.length || countsX.length != partition.length) { throw new IllegalArgumentException("both arrays and the Dirichlet prior must contain the same number of dimensions"); } double logLikelihood = 0.0; double alpha; int totalX = 0; int totalY = 0; int x, y; for (int key=0; key < countsX.length; key++) { x = countsX[key]; y = countsY[key]; totalX += x; totalY += y; alpha = partition[key] * magnitude; logLikelihood += logGamma(alpha) + logGamma(alpha + x + y) - logGamma(alpha + x) - logGamma(alpha + y); } logLikelihood += logGamma(magnitude + totalX) + logGamma(magnitude + totalY) - logGamma(magnitude) - logGamma(magnitude + totalX + totalY); return logLikelihood; } /** Similar to the Dirichlet-multinomial test,s this is a likelihood ratio based * on the Ewens Sampling Formula, which can be considered the distribution of * partitions of integers generated by the Chinese restaurant process. */ public static double ewensLikelihoodRatio(int[] countsX, int[] countsY, double lambda) { if (countsX.length != countsY.length) { throw new IllegalArgumentException("both arrays must contain the same number of dimensions"); } double logLikelihood = 0.0; double alpha; int totalX = 0; int totalY = 0; int total = 0; int x, y; // First count up the totals for (int key=0; key < countsX.length; key++) { x = countsX[key]; y = countsY[key]; totalX += x; totalY += y; total += x + y; } // Now allocate some arrays for the sufficient statisitics // (the number of classes that contain x elements) int[] countHistogramX = new int[total + 1]; int[] countHistogramY = new int[total + 1]; int[] countHistogramBoth = new int[total + 1]; for (int key=0; key < countsX.length; key++) { x = countsX[key]; y = countsY[key]; countHistogramX[ x ]++; countHistogramX[ y ]++; countHistogramBoth[ x + y ]++; } for (int j=1; j <= total; j++) { if (countHistogramX[ j ] == 0 && countHistogramY[ j ] == 0 && countHistogramBoth[ j ] == 0) { continue; } logLikelihood += (countHistogramBoth[ j ] - countHistogramX[ j ] - countHistogramY[ j ]) * Math.log( lambda / j ); logLikelihood += logGamma(countHistogramX[ j ] + 1) + logGamma(countHistogramY[ j ] + 1) - logGamma(countHistogramBoth[ j ] + 1); } logLikelihood += logGamma(total + 1) - logGamma(totalX + 1) - logGamma(totalY + 1); logLikelihood += logGamma(lambda + totalX) + logGamma(lambda + totalY) - logGamma(lambda) - logGamma(lambda + totalX + totalY); return logLikelihood; } public static void runComparison() { double precision; int dimensions; int documents; int meanSize; try { PrintWriter out = new PrintWriter(new BufferedWriter(new FileWriter("comparison"))); dimensions = 10; for (int j=0; j<5; j++) { documents = 100; for (int k=0; k<5; k++) { meanSize = 100; for (int l=0; l<5; l++) { System.out.println(dimensions + "\t" + dimensions + "\t" + documents + "\t" + meanSize); // Finally, run this ten times. for (int m=0; m<10; m++) { // always use Dir(1, 1, 1, ... 1) for now... out.println(compare(dimensions, dimensions, documents, meanSize)); } out.flush(); meanSize *= 2; } documents *= 2; } dimensions *= 2; } out.flush(); out.close(); } catch (Exception e) { e.printStackTrace(System.out); } } public static void main (String[] args) { testSymmetricConcentration(1000, 100, 1000); /* Dirichlet prior = new Dirichlet(100, 1.0); double[] distribution; int[] x, y; for (int i=0; i<50; i++) { Dirichlet nonSymmetric = new Dirichlet(100, prior.nextDistribution()); // Two observations from same multinomial distribution = nonSymmetric.nextDistribution(); x = nonSymmetric.drawObservation(100, distribution); y = nonSymmetric.drawObservation(100, distribution); System.out.print(nonSymmetric.dirichletMultinomialLikelihoodRatio(x, y) + "\t"); System.out.print(ewensLikelihoodRatio(x, y, 1) + "\t"); // Two observations from different multinomials x = nonSymmetric.drawObservation(100); y = nonSymmetric.drawObservation(100); System.out.print(ewensLikelihoodRatio(x, y, 0.1) + "\t"); System.out.println(nonSymmetric.dirichletMultinomialLikelihoodRatio(x, y)); } */ } public Alphabet getAlphabet () { return dict; } public int size () { return partition.length; } public double alpha (int featureIndex) { return magnitude * partition[featureIndex]; } public void print () { System.out.println ("Dirichlet:"); for (int j = 0; j < partition.length; j++) System.out.println (dict!= null ? dict.lookupObject(j).toString() : j + "=" + magnitude * partition[j]); } protected double[] randomRawMultinomial (Randoms r) { double sum = 0; double[] pr = new double[this.partition.length]; for (int i = 0; i < this.partition.length; i++) { // if (alphas[i] < 0) // for (int j = 0; j < alphas.length; j++) // System.out.println (dict.lookupSymbol(j).toString() + "=" + alphas[j]); pr[i] = r.nextGamma(magnitude * partition[i]); sum += pr[i]; } for (int i = 0; i < this.partition.length; i++) pr[i] /= sum; return pr; } public Multinomial randomMultinomial (Randoms r) { return new Multinomial (randomRawMultinomial(r), dict, partition.length, false, false); } public Dirichlet randomDirichlet (Randoms r, double averageAlpha) { double[] pr = randomRawMultinomial (r); double alphaSum = pr.length*averageAlpha; //System.out.println ("randomDirichlet alphaSum = "+alphaSum); for (int i = 0; i < pr.length; i++) pr[i] *= alphaSum; return new Dirichlet (pr, dict); } public FeatureSequence randomFeatureSequence (Randoms r, int length) { Multinomial m = randomMultinomial (r); return m.randomFeatureSequence (r, length); } public FeatureVector randomFeatureVector (Randoms r, int size) { return new FeatureVector (this.randomFeatureSequence (r, size)); } public TokenSequence randomTokenSequence (Randoms r, int length) { FeatureSequence fs = randomFeatureSequence (r, length); TokenSequence ts = new TokenSequence (length); for (int i = 0; i < length; i++) ts.add (fs.getObjectAtPosition(i).toString()); return ts; } public double[] randomVector (Randoms r) { return randomRawMultinomial (r); } public static abstract class Estimator { ArrayList<Multinomial> multinomials; public Estimator () { this.multinomials = new ArrayList<Multinomial>(); } public Estimator (Collection<Multinomial> multinomialsTraining) { this.multinomials = new ArrayList<Multinomial>(multinomialsTraining); for (int i = 1; i < multinomials.size(); i++) if (((Multinomial)multinomials.get(i-1)).size() != ((Multinomial)multinomials.get(i)).size() || ((Multinomial)multinomials.get(i-1)).getAlphabet() != ((Multinomial)multinomials.get(i)).getAlphabet()) throw new IllegalArgumentException ("All multinomials must have same size and Alphabet."); } public void addMultinomial (Multinomial m) { // xxx Assert that it is the right class and size multinomials.add (m); } public abstract Dirichlet estimate (); } public static class MethodOfMomentsEstimator extends Estimator { public Dirichlet estimate () { int dims = multinomials.get(0).size(); double[] alphas = new double[dims]; for (int i = 1; i < multinomials.size(); i++) multinomials.get(i).addProbabilitiesTo(alphas); double alphaSum = 0; for (int i = 0; i < alphas.length; i++) alphaSum += alphas[i]; for (int i = 0; i < alphas.length; i++) alphas[i] /= alphaSum; // xxx Fix this to set sum by variance matching throw new UnsupportedOperationException ("Not yet implemented."); //return new Dirichlet(alphas); } } }
44,456
26.699065
257
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/LabelVector.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.types; import cc.mallet.types.Label; import cc.mallet.types.RankedFeatureVector; public class LabelVector extends RankedFeatureVector implements Labeling { public LabelVector (LabelAlphabet dict, int[] features, double[] values) { super (dict, features, values); } private static int[] indicesForLabels (Label[] labels) { int[] indices = new int[labels.length]; for (int i = 0; i < labels.length; i++) indices[i] = labels[i].getIndex(); return indices; } public LabelVector (Label[] labels, double[] values) { super (labels[0].dictionary, indicesForLabels(labels), values); } public LabelVector (LabelAlphabet dict, double[] values) { super (dict, values); } public final Label labelAtLocation (int loc) { return ((LabelAlphabet)dictionary).lookupLabel(indexAtLocation (loc)); } public LabelAlphabet getLabelAlphabet () { return (LabelAlphabet) dictionary; } // Labeling interface // xxx Change these names to better match RankedFeatureVector? public int getBestIndex () { if (rankOrder == null) setRankOrder (); return rankOrder[0]; } public Label getBestLabel () { return ((LabelAlphabet)dictionary).lookupLabel (getBestIndex()); } public double getBestValue () { if (rankOrder == null) setRankOrder (); return values[rankOrder[0]]; } public double value (Label label) { assert (label.dictionary == this.dictionary); return values[this.location (label.toString ())]; } public int getRank (Label label) { //throw new UnsupportedOperationException (); // CPAL - Implemented this if (rankOrder == null) setRankOrder(); int ii=-1; int tmpIndex = ((LabelAlphabet)dictionary).lookupIndex(label.entry); // Now find this index in the ordered list with a linear search for(ii=0; ii<rankOrder.length ; ii++) { if (rankOrder[ii] == tmpIndex) break; } // CPAL if ii == -1 we have a problem return ii; } public int getRank (int labelIndex) { return getRank(((LabelAlphabet)dictionary).lookupLabel(labelIndex)); } public Label getLabelAtRank (int rank) { if (rankOrder == null) setRankOrder (); return ((LabelAlphabet)dictionary).lookupLabel (rankOrder[rank]); } public double getValueAtRank (int rank) { if (rankOrder == null) setRankOrder (); return values[rankOrder[rank]]; } public LabelVector toLabelVector () { return this; } // Inherited from FeatureVector or SparseVector // public void addTo (double[] values) // public void addTo (double[] values, double scale) // public int numLocations (); // public double valueAtLocation (int loc) }
3,278
21.93007
91
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/FeatureConjunction.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.types; import java.util.logging.*; import java.util.ArrayList; import java.util.Arrays; import java.util.regex.*; import java.io.*; import cc.mallet.types.*; import cc.mallet.util.MalletLogger; public class FeatureConjunction implements Serializable { private static Logger logger = MalletLogger.getLogger(FeatureConjunction.class.getName()); static private final String conjunctionString = "_&_"; static private final String negationString = "!"; static private final Pattern conjunctionPattern = Pattern.compile (conjunctionString); String name; Alphabet dictionary; int[] features; boolean[] negations; // true here means Feature must be present int index = -1; // -1 if this conjunction isn't yet part of Alphabet /** If negations[i] is true, insist that the feature has non-zero value; if false, insist that it has zero value. Note: Does not check to make sure that it hasn't already been added. If negations[] is null, then assume all negations[i] are true. */ public FeatureConjunction (String name, Alphabet dictionary, int[] features, boolean[] negations, boolean checkSorted, boolean copyFeatures, boolean copyNegations) { assert (negations == null || features.length == negations.length); this.dictionary = dictionary; if (copyFeatures) { this.features = new int[features.length]; System.arraycopy (features, 0, this.features, 0, features.length); } else { this.features = features; } if (copyNegations && negations != null) { this.negations = new boolean[negations.length]; System.arraycopy (negations, 0, this.negations, 0, negations.length); } else { this.negations = negations; } if (checkSorted) { for (int i = this.features.length-1; i >= 0; i--) { boolean swapped = false; for (int j = 0; j < i; j++) { if (features[i-1] > features[i]) { int tmpf = this.features[i]; this.features[i] = this.features[i-1]; this.features[i-1] = tmpf; if (negations != null) { boolean tmpb = this.negations[i]; this.negations[i] = this.negations[i-1]; this.negations[i-1] = tmpb; } swapped = true; } else if (features[i-1] == features[i]) throw new IllegalArgumentException ("Same Feature cannot occur twice."); } if (!swapped) break; } } if (name != null) this.name = name; else { StringBuffer sb = new StringBuffer(); for (int i = 0; i < this.features.length; i++) { if (negations != null && this.negations[i] == false) sb.append(negationString); if (i > 0) sb.append(conjunctionString); sb.append (dictionary.lookupObject(features[i]).toString()); } // Shouldn't sb.toString() be saved in this.name here? -akm 1/08 } } public FeatureConjunction (String name, Alphabet dictionary, int[] features, boolean[] negations, boolean checkSorted) { this (name, dictionary, features, negations, checkSorted, true, true); } public FeatureConjunction (String name, Alphabet dictionary, int[] features, boolean[] negations) { this (name, dictionary, features, negations, true); } public static boolean isValidConjunction (int[] features) { for (int i = 1; i < features.length; i++) if (features[i-1] >= features[i]) return false; return true; } // Always in "Alphabet index" order // xxx This one doesn't check for duplicates among sub-constituents in the conjunction, as // the next method does. public static String getName (Alphabet dictionary, int[] features, boolean[] negations) { if (true) { if (negations != null) for (int i = 0; i < negations.length; i++) if (negations[i]) throw new UnsupportedOperationException ("Doesn't yet check for sub-duplicates with negations."); return getName (dictionary, features); } // Split apart any feature[i] that is itself a conjunction feature //int[] featureIndices = getFeatureIndices (dictionary, dictionary.lookupObject( // xxx Add code here to do the sorting... // Make sure the the features area sorted for (int i = 1; i < features.length; i++) if (features[i-1] >= features[i]) throw new IllegalArgumentException ("feature index not sorted, or contains duplicate"); StringBuffer sb = new StringBuffer (); for (int i = 0; i < features.length; i++) { if (i > 0) sb.append (conjunctionString); if (negations != null && negations[i]) sb.append (negationString); sb.append (dictionary.lookupObject(features[i]).toString()); } return sb.toString(); } // Always in "Alphabet index" order public static String getName (Alphabet dictionary, int[] features) { // Split apart any feature[i] that is itself a conjunction feature for (int i = 0; i < features.length; i++) { int[] featureIndices = getFeatureIndices (dictionary, (String) dictionary.lookupObject(features[i])); if (featureIndices.length > 1) { int newLength = features.length-1+featureIndices.length; int[] newFeatures = new int[newLength]; int n = 0; for (int j = 0; j < i; j++) newFeatures[n++] = features[j]; for (int j = 0; j < featureIndices.length; j++) newFeatures[n++] = featureIndices[j]; for (int j = i+1; j < features.length; j++) newFeatures[n++] = features[j]; Arrays.sort (newFeatures); return getName (dictionary, newFeatures); } } // xxx Add code here to do the sorting... // Make sure the the features area sorted, and remove any duplicates for (int i = 1; i < features.length; i++) { if (features[i-1] == features[i]) { // Remove duplicate and try again int[] newFeatures = new int[features.length-1]; int n = 0; for (int j = 0; j < i; j++) newFeatures[n++] = features[j]; for (int j = i+1; j < features.length; j++) newFeatures[n++] = features[j]; return getName (dictionary, newFeatures); } if (features[i-1] > features[i]) throw new IllegalArgumentException ("feature indices not sorted"); } StringBuffer sb = new StringBuffer (); for (int i = 0; i < features.length; i++) { if (i > 0) sb.append (conjunctionString); sb.append (dictionary.lookupObject(features[i]).toString()); } return sb.toString(); } public static boolean featuresOverlap (Alphabet dictionary, int feature1, int feature2) { if (feature1 == feature2) return true; int[] fis1 = getFeatureIndices (dictionary, (String)dictionary.lookupObject(feature1)); int[] fis2 = getFeatureIndices (dictionary, (String)dictionary.lookupObject(feature2)); for (int i = 0, j = 0; i < fis1.length; i++) { assert (i >= fis1.length-2 || fis1[i] < fis1[i+1]); assert (j >= fis2.length-2 || fis2[j] < fis2[j+1]); while (fis2[j] < fis1[i] && j < fis2.length-1) j++; if (fis1[i] == fis2[j]) return true; } return false; } // Always in "Alphabet index" order public static String getName (Alphabet dictionary, int feature1, int feature2) { if (feature1 < feature2) return getName (dictionary, new int[] {feature1, feature2}); else return getName (dictionary, new int[] {feature2, feature1}); //assert (feature1 != feature2); //String string1 = dictionary.lookupObject(feature1).toString(); //String string2 = dictionary.lookupObject(feature2).toString(); //if (feature1 < feature2) //return string1 + conjunctionString + string2; //else //return string2 + conjunctionString + string1; } public static int[] getFeatureIndices (Alphabet dictionary, String featureConjunctionName) { String[] featureNames = conjunctionPattern.split (featureConjunctionName); int[] ret = new int[featureNames.length]; for (int i = 0; i < featureNames.length; i++) { assert (!featureNames[i].startsWith(negationString)); ret[i] = dictionary.lookupIndex (featureNames[i], false); logger.fine(i + "th feature: " +featureNames[i]+" in "+featureConjunctionName); assert (ret[i] != -1) : "Couldn't find index for " + i + "th feature: " +featureNames[i]+" in "+featureConjunctionName; } java.util.Arrays.sort (ret); return ret; } public FeatureConjunction (Alphabet dictionary, int[] features, boolean[] negations) { this (getName (dictionary, features, negations), dictionary, features, negations, true); } private static boolean[] trueArray (int length) { boolean[] ret = new boolean[length]; for (int i = 0; i < length; i++) ret[i] = true; return ret; } public FeatureConjunction (Alphabet dictionary, int[] features) { this (getName (dictionary, features, null), dictionary, features, null, true, true, false); } public boolean satisfiedBy (FeatureVector fv) { if (fv.getAlphabet() != dictionary) throw new IllegalArgumentException ("Vocabularies do not match."); int fvsize = fv.numLocations(); int fvl = 0; for (int fcl = 0; fcl < features.length; fcl++) { int fcli = features[fcl]; while (fvl < fvsize && fv.indexAtLocation(fvl) < fcli) fvl++; if (fvl < fvsize && fv.indexAtLocation(fvl) == fcli && fv.valueAtLocation(fvl) != 0) { // The fcli'th Feature of the FeatureConjunction is present in the FeatureVector if (negations != null && negations[fcl] == false) // but this Feature was negated in the FeatureConjunction, so not satisfied return false; } else if (negations == null || negations[fcl] == true) // The fcli'th Feature of the FeatureConjunction is not present in the FeatureVector // and this Feature was unnegated in the FeatureConjunction, so not satisfied return false; } return true; } public int getIndex () { return index; } public void addTo (AugmentableFeatureVector fv, double value, FeatureSelection fs) { // xxx This could be simplified for the special case of a FeatureConjunction with only one conjunct if (this.satisfiedBy (fv)) { index = fv.getAlphabet().lookupIndex (name); // Make sure that this feature is selected if (fs != null) fs.add (index); if (index >= 0 && fv.value(index) > 0) // Don't add features that are already there return; assert (index != -1); fv.add (index, value); } } public void addTo (AugmentableFeatureVector fv, double value) { addTo (fv, value, null); } public void addTo (AugmentableFeatureVector fv) { this.addTo (fv, 1.0); } // Serialization private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private static final int NULL_INTEGER = -1; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); out.writeObject(name); out.writeObject(dictionary); if (features == null) out.writeInt(NULL_INTEGER); else { out.writeInt(features.length); for (int i = 0; i < features.length; i++) { out.writeInt(features[i]); } } if (negations == null) out.writeInt(NULL_INTEGER); else { out.writeInt(negations.length); for (int i = 0; i < negations.length; i++) { out.writeBoolean(negations[i]); } } out.writeInt(index); } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); name = (String)in.readObject(); dictionary = (Alphabet)in.readObject(); int size = in.readInt (); if (size == NULL_INTEGER) features = null; else { features = new int[size]; for (int i = 0; i < size; i++) { features[i] = in.readInt(); } } size = in.readInt (); if (size == NULL_INTEGER) negations = null; else { negations = new boolean[size]; for (int i = 0; i < size; i++) { negations[i] = in.readBoolean(); } } index = in.readInt(); } public static class List implements Serializable { ArrayList conjunctions; public List () { this.conjunctions = new ArrayList(); } public int size () { return conjunctions.size(); } public FeatureConjunction get (int i) { return (FeatureConjunction) conjunctions.get(i); } public void add (FeatureConjunction fc) { if (conjunctions.size() > 0 && fc.dictionary != ((FeatureConjunction)conjunctions.get(0)).dictionary) throw new IllegalArgumentException ("Alphabet does not match."); conjunctions.add (fc); } public void addTo (AugmentableFeatureVector fv, double value, FeatureSelection fs) { // xxx Make this more efficient for (int i = 0; i < conjunctions.size(); i++) ((FeatureConjunction)conjunctions.get(i)).addTo (fv, value, fs); } public void addTo (AugmentableFeatureVector fv, double value) { addTo (fv, value, null); } private static final long serialVersionUID = 1; private static final int CURRENT_SERIAL_VERSION = 0; private static final int NULL_INTEGER = -1; private void writeObject (ObjectOutputStream out) throws IOException { out.writeInt (CURRENT_SERIAL_VERSION); if (conjunctions == null) out.writeInt(NULL_INTEGER); else { out.writeInt(conjunctions.size()); for (int i = 0; i < conjunctions.size(); i++) { out.writeObject(conjunctions.get(i)); } } } private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException { int version = in.readInt (); int size = in.readInt (); if (size == NULL_INTEGER) conjunctions = null; else { conjunctions = new ArrayList(); for (int i = 0; i < size; i++) { conjunctions.add((FeatureConjunction)in.readObject()); } } } } }
14,066
31.5625
122
java
twitter_nlp
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/ConstantMatrix.java
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept. This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit). http://www.cs.umass.edu/~mccallum/mallet This software is provided under the terms of the Common Public License, version 1.0, as published by http://www.opensource.org. For further information, see the file `LICENSE' included with this distribution. */ /** @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a> */ package cc.mallet.types; public interface ConstantMatrix { public int getNumDimensions (); public int getDimensions (int[] sizes); public double value (int[] indices); // Access using a single index, efficient for dense matrices, but not sparse // Move to DenseMatrix? public int singleIndex (int[] indices); public void singleToIndices (int i, int[] indices); public double singleValue (int i); public int singleSize (); // Access by index into sparse array, efficient for sparse and dense matrices public int numLocations (); public int location (int index); public double valueAtLocation (int location); // Returns a "singleIndex" public int indexAtLocation (int location); public double dotProduct (ConstantMatrix m); public double absNorm (); public double oneNorm (); public double twoNorm (); public double infinityNorm (); public void print(); public boolean isNaN(); public ConstantMatrix cloneMatrix (); }
1,470
29.020408
91
java