repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/FeatureVectorSequence.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
public class FeatureVectorSequence implements Sequence<FeatureVector>, Serializable, AlphabetCarrying
{
FeatureVector[] sequence;
Alphabet alphabet;
public FeatureVectorSequence (FeatureVector[] featureVectors)
{
this.sequence = featureVectors;
this.alphabet = featureVectors[0].getAlphabet();
}
public FeatureVectorSequence (Alphabet dict,
TokenSequence tokens,
boolean binary,
boolean augmentable,
boolean growAlphabet)
{
this.alphabet = dict;
this.sequence = new FeatureVector[tokens.size()];
if (augmentable)
for (int i = 0; i < tokens.size(); i++)
sequence[i] = new AugmentableFeatureVector (dict, tokens.get(i).getFeatures(), binary, growAlphabet);
else
for (int i = 0; i < tokens.size(); i++)
sequence[i] = new FeatureVector (dict, tokens.get(i).getFeatures(), binary, growAlphabet);
}
public FeatureVectorSequence (Alphabet dict,
TokenSequence tokens,
boolean binary,
boolean augmentable)
{
this(dict, tokens, binary, augmentable, true);
}
public FeatureVectorSequence (Alphabet dict,
TokenSequence tokens)
{
this (dict, tokens, false, false);
}
public Alphabet getAlphabet() {
return alphabet;
}
public Alphabet[] getAlphabets()
{
return new Alphabet[] {getAlphabet()};
}
public int size ()
{
return sequence.length;
}
public FeatureVector get (int i)
{
return sequence[i];
}
public FeatureVector getFeatureVector (int i)
{
return sequence [i];
}
public double dotProduct (int sequencePosition,
Matrix2 weights,
int weightRowIndex)
{
return weights.rowDotProduct (weightRowIndex, sequence[sequencePosition]);
}
public double dotProduct (int sequencePosition, Vector weights)
{
return weights.dotProduct (sequence[sequencePosition]);
}
/** An iterator over the FeatureVectors in the sequence. */
public class Iterator implements java.util.Iterator<FeatureVector>
{
int pos;
public Iterator () {
pos = 0;
}
public FeatureVector next() {
return sequence[pos++];
}
public int getIndex () {
return pos;
}
public boolean hasNext() {
return pos < sequence.length;
}
public void remove () {
throw new UnsupportedOperationException ();
}
}
public Iterator iterator ()
{
return new Iterator();
}
public String toString ()
{
StringBuffer sb = new StringBuffer ();
sb.append (super.toString());
sb.append ('\n');
for (int i = 0; i < sequence.length; i++) {
sb.append (Integer.toString(i)+": ");
//sb.append (sequence[i].getClass().getName()); sb.append (' ');
sb.append (sequence[i].toString(true));
sb.append ('\n');
}
return sb.toString();
}
// Serialization of Instance
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject(alphabet);
out.writeObject(sequence);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
@SuppressWarnings("unused")
int version = in.readInt ();
this.alphabet = (Alphabet) in.readObject();
this.sequence = (FeatureVector[]) in.readObject();
}
}
| 3,885 | 23.136646 | 105 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Labeling.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import cc.mallet.types.Label;
/** A distribution over possible labels for an instance. */
public interface Labeling extends AlphabetCarrying
{
public LabelAlphabet getLabelAlphabet ();
public Label getBestLabel ();
public double getBestValue ();
public int getBestIndex ();
public double value (Label label);
public double value (int labelIndex);
// Zero-based
public int getRank (Label label);
public int getRank (int labelIndex);
public Label getLabelAtRank (int rank);
public double getValueAtRank (int rank);
public void addTo (double[] values);
public void addTo (double[] values, double scale);
// The number of non-zero-weight Labels in this Labeling, not total
// number in the Alphabet
public int numLocations ();
// xxx Use "get..."?
public int indexAtLocation (int pos);
public Label labelAtLocation (int pos);
public double valueAtLocation (int pos);
public LabelVector toLabelVector();
}
| 1,483 | 27.538462 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/RankedFeatureVector.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
A FeatureVector for which you can efficiently get the feature with
highest value, and other ranks.
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.io.PrintStream;
import java.io.OutputStream;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.Label;
public class RankedFeatureVector extends FeatureVector
{
int[] rankOrder;
private static final int SORTINIT = -1;
int sortedTo = SORTINIT; /* Extent of latest sort */
public RankedFeatureVector (Alphabet dict,
int[] indices,
double[] values)
{
super (dict, indices, values);
}
public RankedFeatureVector (Alphabet dict, double[] values)
{
super (dict, values);
}
private static double[] subArray (double[] a, int begin, int length)
{
double[] ret = new double[length];
System.arraycopy(a, begin, ret, 0, length);
return ret;
}
public RankedFeatureVector (Alphabet dict, double[] values, int begin, int length)
{
super (dict, subArray(values, begin, length));
}
public RankedFeatureVector (Alphabet dict, DenseVector v)
{
this (dict, v.values);
}
public RankedFeatureVector (Alphabet dict, AugmentableFeatureVector v)
{
super (dict, v.indices, v.values, v.size, v.size,
true, true, true);
}
public RankedFeatureVector (Alphabet dict, SparseVector v)
{
super (dict, v.indices, v.values);
}
// xxx This bubble sort is a major inefficiency.
// Implement a O(n log(n)) method!
// No longer used!
protected void setRankOrder ()
{
this.rankOrder = new int[values.length];
for (int i = 0; i < rankOrder.length; i++) {
rankOrder[i] = i;
assert (!Double.isNaN(values[i]));
}
// BubbleSort from back
for (int i = rankOrder.length-1; i >= 0; i--) {
//if (i % 1000 == 0)
//System.out.println ("RankedFeatureVector.setRankOrder i="+i);
boolean swapped = false;
for (int j = 0; j < i; j++)
if (values[rankOrder[j]] < values[rankOrder[j+1]]) {
// swap
int r = rankOrder[j];
rankOrder[j] = rankOrder[j+1];
rankOrder[j+1] = r;
}
}
}
protected void setRankOrder (int extent, boolean reset)
{
int sortExtent;
// Set the number of cells to sort, making sure we don't go past the max.
// Since we are using insertion sort, sorting n-1 sorts the whole array.
sortExtent = (extent >= values.length) ? values.length - 1: extent;
if (sortedTo == SORTINIT || reset) { // reinitialize and sort
this.rankOrder = new int[values.length];
for (int i = 0; i < rankOrder.length; i++) {
rankOrder[i] = i;
assert (!Double.isNaN(values[i]));
}
}
// Selection sort
for (int i = sortedTo+1; i <= sortExtent; i++) {
double max = values[rankOrder[i]];
int maxIndex = i;
for(int j = i+1; j < rankOrder.length; j++) {
if (values[rankOrder[j]] > max) {
max = values[rankOrder[j]];
maxIndex = j;
}
}
//swap
int r = rankOrder[maxIndex];
rankOrder[maxIndex] = rankOrder[i];
rankOrder[i] = r;
sortedTo = i;
}
}
//added by Limin Yao, rank the elements ascendingly, the smaller is in the front
protected void setReverseRankOrder (int extent, boolean reset)
{
int sortExtent;
// Set the number of cells to sort, making sure we don't go past the max.
// Since we are using insertion sort, sorting n-1 sorts the whole array.
sortExtent = (extent >= values.length) ? values.length - 1: extent;
if (sortedTo == SORTINIT || reset) { // reinitialize and sort
this.rankOrder = new int[values.length];
for (int i = 0; i < rankOrder.length; i++) {
rankOrder[i] = i;
assert (!Double.isNaN(values[i]));
}
}
// Selection sort
for (int i = sortedTo+1; i <= sortExtent; i++) {
double min = values[rankOrder[i]];
int minIndex = i;
for(int j = i+1; j < rankOrder.length; j++) {
if (values[rankOrder[j]] < min) {
min = values[rankOrder[j]];
minIndex = j;
}
}
//swap
int r = rankOrder[minIndex];
rankOrder[minIndex] = rankOrder[i];
rankOrder[i] = r;
sortedTo = i;
}
}
protected void setRankOrder (int extent) {
setRankOrder(extent, false);
}
public int getMaxValuedIndex ()
{
if (rankOrder == null)
setRankOrder (0);
return getIndexAtRank(0); // was return rankOrder[0];
}
public Object getMaxValuedObject ()
{
return dictionary.lookupObject (getMaxValuedIndex());
}
public int getMaxValuedIndexIn (FeatureSelection fs)
{
if (fs == null)
return getMaxValuedIndex();
assert (fs.getAlphabet() == dictionary);
// xxx Make this more efficient! I'm pretty sure that Java BitSet's can do this more efficiently
int i = 0;
while (!fs.contains(rankOrder[i])) {
setRankOrder (i);
i++;
}
//System.out.println ("RankedFeatureVector.getMaxValuedIndexIn feature="
//+dictionary.lookupObject(rankOrder[i]));
return getIndexAtRank(i); // was return rankOrder[i]
}
public Object getMaxValuedObjectIn (FeatureSelection fs)
{
return dictionary.lookupObject (getMaxValuedIndexIn(fs));
}
public double getMaxValue ()
{
if (rankOrder == null)
setRankOrder (0);
return values[rankOrder[0]];
}
public double getMaxValueIn (FeatureSelection fs)
{
if (fs == null)
return getMaxValue();
int i = 0;
while (!fs.contains(i)) {
setRankOrder (i);
i++;
}
return values[rankOrder[i]];
}
public int getIndexAtRank (int rank)
{
setRankOrder (rank);
return indexAtLocation(rankOrder[rank]); // was return rankOrder[rank]
}
public Object getObjectAtRank (int rank)
{
setRankOrder (rank);
return dictionary.lookupObject (getIndexAtRank(rank)); // was return dictionary.lookupObject (rankOrder[rank]);
}
public double getValueAtRank (int rank)
{
if (values == null)
return 1.0;
setRankOrder (rank);
if (rank >= rankOrder.length) {
rank = rankOrder.length -1;
System.err.println("rank larger than rankOrder.length. rank = " + rank + "rankOrder.length = " + rankOrder.length);
}
if (rankOrder[rank] >= values.length) {
System.err.println("rankOrder[rank] out of range.");
return 1.0;
}
return values[rankOrder[rank]];
}
/**
* Prints a human-readable version of this vector, with features listed in ranked order.
* @param out Stream to write to
*/
public void printByRank (OutputStream out)
{
printByRank(new PrintWriter (new OutputStreamWriter (out), true));
}
/**
* Prints a human-readable version of this vector, with features listed in ranked order.
* @param out Writer to write to
*/
public void printByRank (PrintWriter out)
{
for (int rank = 0; rank < numLocations (); rank++) {
int idx = getIndexAtRank (rank);
double val = getValueAtRank (rank);
Object obj = dictionary.lookupObject (idx);
out.print (obj+":"+val + " ");
}
}
//added by Limin Yao
public void printTopK (PrintWriter out, int num)
{
int length = numLocations();
if(num>length)
num=length;
for (int rank = 0; rank < num; rank++) {
int idx = getIndexAtRank (rank);
double val = getValueAtRank (rank);
Object obj = dictionary.lookupObject (idx);
out.print (obj+":"+val + " ");
}
}
public void printLowerK (PrintWriter out, int num)
{
int length = numLocations();
assert(num < length);
for (int rank = length-num ; rank < length; rank++) {
int idx = getIndexAtRank (rank);
double val = getValueAtRank (rank);
Object obj = dictionary.lookupObject (idx);
out.print (obj+":"+val + " ");
}
}
public int getRank (Object o)
{
throw new UnsupportedOperationException ("Not yet implemented");
}
public int getRank (int index)
{
throw new UnsupportedOperationException ("Not yet implemented");
}
public void set (int i, double v)
{
throw new UnsupportedOperationException (RankedFeatureVector.class.getName() + " is immutable");
}
public interface Factory
{
public RankedFeatureVector newRankedFeatureVector (InstanceList ilist);
}
public interface PerLabelFactory
{
public RankedFeatureVector[] newRankedFeatureVectors (InstanceList ilist);
}
}
| 8,633 | 25.323171 | 118 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/FeatureSelector.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
Given an arbitrary scheme for ranking features, set of feature selection of
an InstanceList.
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.util.logging.*;
import cc.mallet.util.MalletLogger;
public class FeatureSelector
{
private static Logger logger = MalletLogger.getLogger(FeatureSelector.class.getName());
// Only one of the following two will be non-null
RankedFeatureVector.Factory ranker;
RankedFeatureVector.PerLabelFactory perLabelRanker;
// Only one of the following two will be changed
int numFeatures = -1;
double minThreshold = Double.POSITIVE_INFINITY;
public FeatureSelector (RankedFeatureVector.Factory ranker,
int numFeatures)
{
this.ranker = ranker;
this.numFeatures = numFeatures;
}
public FeatureSelector (RankedFeatureVector.Factory ranker,
double minThreshold)
{
this.ranker = ranker;
this.minThreshold = minThreshold;
}
public FeatureSelector (RankedFeatureVector.PerLabelFactory perLabelRanker,
int numFeatures)
{
this.perLabelRanker = perLabelRanker;
this.numFeatures = numFeatures;
}
public FeatureSelector (RankedFeatureVector.PerLabelFactory perLabelRanker,
double minThreshold)
{
this.perLabelRanker = perLabelRanker;
this.minThreshold = minThreshold;
}
public void selectFeaturesFor (InstanceList ilist)
{
if (perLabelRanker != null)
selectFeaturesForPerLabel (ilist);
else
selectFeaturesForAllLabels (ilist);
}
public void selectFeaturesForAllLabels (InstanceList ilist)
{
RankedFeatureVector ranking = ranker.newRankedFeatureVector (ilist);
FeatureSelection fs = new FeatureSelection (ilist.getDataAlphabet());
if (numFeatures != -1) { // Select by number of features.
int nf = Math.min (numFeatures, ranking.singleSize());
for (int i = 0; i < nf; i++) {
logger.info ("adding feature "+i+" word="+ilist.getDataAlphabet().lookupObject(ranking.getIndexAtRank(i)));
fs.add (ranking.getIndexAtRank(i));
}
} else { // Select by threshold.
for (int i = 0; i < ranking.singleSize(); i++) {
if (ranking.getValueAtRank(i) > minThreshold)
fs.add (ranking.getIndexAtRank(i));
}
}
logger.info("Selected " + fs.cardinality() + " features from " +
ilist.getDataAlphabet().size() + " features");
ilist.setPerLabelFeatureSelection (null);
ilist.setFeatureSelection (fs);
}
public void selectFeaturesForPerLabel (InstanceList ilist)
{
RankedFeatureVector[] rankings = perLabelRanker.newRankedFeatureVectors (ilist);
int numClasses = rankings.length;
FeatureSelection[] fs = new FeatureSelection[numClasses];
for (int i = 0; i < numClasses; i++) {
fs[i] = new FeatureSelection (ilist.getDataAlphabet());
RankedFeatureVector ranking = rankings[i];
int nf = Math.min (numFeatures, ranking.singleSize());
if (nf >= 0) {
for (int j = 0; j < nf; j++)
fs[i].add (ranking.getIndexAtRank(j));
} else {
for (int j = 0; j < ranking.singleSize(); j++) {
if (ranking.getValueAtRank(j) > minThreshold)
fs[i].add (ranking.getIndexAtRank(j));
else
break;
}
}
}
ilist.setFeatureSelection (null);
ilist.setPerLabelFeatureSelection (fs);
}
}
| 3,708 | 29.908333 | 111 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/IndexedSparseVector.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
Sparse, yet its (present) values can be changed. You can't, however, add
values that were (zero and) missing.
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Arrays;
import java.util.logging.*;
import java.io.*;
import cc.mallet.types.Alphabet;
import cc.mallet.types.FeatureSequence;
import cc.mallet.types.Vector;
import cc.mallet.util.MalletLogger;
import cc.mallet.util.PropertyList;
public class IndexedSparseVector extends SparseVector implements Serializable
{
private static Logger logger = MalletLogger.getLogger(SparseVector.class.getName());
transient private int[] index2location;
public IndexedSparseVector (int[] indices, double[] values,
int capacity, int size,
boolean copy,
boolean checkIndicesSorted,
boolean removeDuplicates)
{
super (indices, values, capacity, size, copy, checkIndicesSorted, removeDuplicates);
assert (indices != null);
}
/** Create an empty vector */
public IndexedSparseVector ()
{
super (new int[0], new double[0], 0, 0, false, false, false);
}
/** Create non-binary vector, possibly dense if "featureIndices" or possibly sparse, if not */
public IndexedSparseVector (int[] featureIndices,
double[] values)
{
super (featureIndices, values);
}
/** Create binary vector */
public IndexedSparseVector (int[] featureIndices)
{
super (featureIndices);
}
// xxx We need to implement this in FeatureVector subclasses
public ConstantMatrix cloneMatrix ()
{
return new IndexedSparseVector (indices, values);
}
public ConstantMatrix cloneMatrixZeroed () {
assert (values != null);
int[] newIndices = new int[indices.length];
System.arraycopy (indices, 0, newIndices, 0, indices.length);
IndexedSparseVector sv = new IndexedSparseVector
(newIndices, new double[values.length],
values.length, values.length, false, false, false);
// Share the index2location array. This will be unsafe if
// IndexedSparseVectors are ever allowed to be modifiable, but I
// don't think that this will be the case.
if (index2location != null)
sv.index2location = index2location;
return sv;
}
// Methods that change values
public void indexVector ()
{
if ((index2location == null) && (indices.length > 0))
setIndex2Location ();
}
private void setIndex2Location ()
{
//System.out.println ("IndexedSparseVector setIndex2Location indices.length="+indices.length+" maxindex="+indices[indices.length-1]);
assert (indices != null);
assert (index2location == null);
int size;
if (indices.length == 0)
size = 0;
else size = indices[indices.length-1]+1;
assert (size >= indices.length);
this.index2location = new int[size];
Arrays.fill (index2location, -1);
for (int i = 0; i < indices.length; i++)
index2location[indices[i]] = i;
}
public final void setValue (int index, double value) {
if (index2location == null)
setIndex2Location ();
int location = index < index2location.length ? index2location[index] : -1;
if (location >= 0)
values[location] = value;
else
throw new IllegalArgumentException ("Trying to set value that isn't present in IndexedSparseVector");
}
public final void setValueAtLocation (int location, double value)
{
values[location] = value;
}
// I dislike this name, but it's consistent with DenseVector. -cas
public void columnPlusEquals (int index, double value) {
if (index2location == null)
setIndex2Location ();
int location = index < index2location.length ? index2location[index] : -1;
if (location >= 0)
values[location] += value;
else
throw new IllegalArgumentException ("Trying to set value that isn't present in IndexedSparseVector");
}
public final double dotProduct (DenseVector v) {
double ret = 0;
if (values == null)
for (int i = 0; i < indices.length; i++)
ret += v.value(indices[i]);
else
for (int i = 0; i < indices.length; i++)
ret += values[i] * v.value(indices[i]);
return ret;
}
public final double dotProduct (SparseVector v)
{
if (indices.length == 0)
return 0;
if (index2location == null)
setIndex2Location ();
double ret = 0;
int vNumLocs = v.numLocations ();
if (isBinary ()) {
// this vector is binary
for (int i = 0; i < vNumLocs; i++) {
int index = v.indexAtLocation(i);
if (index >= index2location.length)
break;
if (index2location [index] >= 0)
ret += v.valueAtLocation (i);
}
} else if (v.isBinary ()) {
// the other vector is binary
for (int i = 0; i < vNumLocs; i++) {
int index = v.indexAtLocation(i);
if (index >= index2location.length)
break;
int location = index2location[index];
if (location >= 0)
ret += values[location];
}
} else {
for (int i = 0; i < vNumLocs; i++) {
int index = v.indexAtLocation(i);
if (index >= index2location.length)
break;
int location = index2location[index];
if (location >= 0)
ret += values[location] * v.valueAtLocation (i);
}
}
return ret;
}
public final void plusEqualsSparse (SparseVector v, double factor)
{
if (indices.length == 0)
return;
if (index2location == null)
setIndex2Location ();
for (int i = 0; i < v.numLocations(); i++) {
int index = v.indexAtLocation(i);
if (index >= index2location.length)
break;
int location = index2location[index];
if (location >= 0)
values[location] += v.valueAtLocation (i) * factor;
}
}
public final void plusEqualsSparse (SparseVector v)
{
if (indices.length == 0)
return;
if (index2location == null)
setIndex2Location ();
for (int i = 0; i < v.numLocations(); i++) {
int index = v.indexAtLocation(i);
if (index >= index2location.length)
break;
int location = index2location[index];
if (location >= 0)
values[location] += v.valueAtLocation (i);
}
}
public final void setAll (double v)
{
for (int i = 0; i < values.length; i++)
values[i] = v;
}
public int location (int index)
{
// No test for indices == null, for this is not allowed in an IndexedSparseVector
if (index2location == null)
setIndex2Location ();
if (index >= index2location.length)
return -1;
return index2location [index];
}
//Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException
{
// index2location is considered transient to save disk space
out.writeInt (CURRENT_SERIAL_VERSION);
}
private void readObject (ObjectInputStream in)
throws IOException, ClassNotFoundException
{
int version = in.readInt ();
}
}
| 7,359 | 27.307692 | 135 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Matrix.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
public interface Matrix extends ConstantMatrix
{
public void setValue (int[] indices, double value);
public void setSingleValue (int i, double value);
public void incrementSingleValue (int i, double delta);
public void setValueAtLocation (int loc, double value);
public void setAll (double v);
public void set (ConstantMatrix m);
public void setWithAddend (ConstantMatrix m, double addend);
public void setWithFactor (ConstantMatrix m, double factor);
public void plusEquals (ConstantMatrix m);
public void plusEquals (ConstantMatrix m, double factor);
public void equalsPlus (double factor, ConstantMatrix m);
public void timesEquals (double factor);
public void elementwiseTimesEquals (ConstantMatrix m);
public void elementwiseTimesEquals (ConstantMatrix m, double factor);
public void divideEquals (double factor);
public void elementwiseDivideEquals (ConstantMatrix m);
public void elementwiseDivideEquals (ConstantMatrix m, double factor);
public double oneNormalize ();
public double twoNormalize ();
public double absNormalize();
public double infinityNormalize ();
}
| 1,653 | 34.956522 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/KLGain.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
The "gain" obtained by adding a feature to an exponential model.
From Della Pietra, Della Pietra & Lafferty, 1997
What is the *right* way to smooth p[] and q[] so we don't get zeros,
(and therefore zeros in alpha[], and NaN in klgain[]?)
I think it would be to put the prior over parameters into G_q(\alpha,g).
Right now I'm simply doing a little m-estimate smoothing of p[] and q[].
Note that we use Math.log(), not log-base-2, so the units are not "bits".
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.util.logging.*;
import cc.mallet.classify.Classification;
import cc.mallet.util.MalletLogger;
public class KLGain extends RankedFeatureVector
{
private static Logger logger = MalletLogger.getLogger(KLGain.class.getName());
// KLGain of a feature, f, is defined in terms of MaxEnt-type feature+class "Feature"s, F,
// F = f,c
// KLGain of a Feature, F, is
// G(F) = KL(p~[C]||q[C]) - KL(p~[C]||q_F[C])
// where p~[] is the empirical distribution, according to the true class label distribution
// and q[] is the distribution from the (imperfect) classifier
// and q_F[] is the distribution from the (imperfect) classifier with F added
// and F's weight adjusted (but none of the other weights adjusted)
// KLGain of a feature,f, is
// G(f) = sum_c G(f,c)
private static double[] calcKLGains (InstanceList ilist, LabelVector[] classifications)
{
int numInstances = ilist.size();
int numClasses = ilist.getTargetAlphabet().size();
int numFeatures = ilist.getDataAlphabet().size();
assert (ilist.size() > 0);
// Notation from Della Pietra & Lafferty 1997, p.4
// "p~"
double[][] p = new double[numClasses][numFeatures];
// "q"
double[][] q = new double[numClasses][numFeatures];
// "alpha", the weight of the new feature
double[][] alphas = new double[numClasses][numFeatures];
double flv; // feature location value
int fli; // feature location index
logger.info ("Starting klgains, #instances="+numInstances);
double trueLabelWeightSum = 0;
double modelLabelWeightSum = 0;
// Actually pretty lame smoothing based on ghost-counts
final boolean doingSmoothing = true;
double numInExpectation = doingSmoothing ? (numInstances+1.0) : (numInstances);
// Attempt some add-hoc smoothing; remove the "+1.0" in the line above, if not doing smoothing
if (doingSmoothing) {
for (int i = 0; i < numClasses; i++)
for (int j = 0; j < numFeatures; j++) {
p[i][j] = q[i][j] = 1.0/(numInExpectation*numFeatures*numClasses);
trueLabelWeightSum += p[i][j];
modelLabelWeightSum += q[i][j];
}
}
for (int i = 0; i < numInstances; i++) {
assert (classifications[i].getLabelAlphabet() == ilist.getTargetAlphabet());
Instance inst = ilist.get(i);
Labeling labeling = inst.getLabeling ();
FeatureVector fv = (FeatureVector) inst.getData ();
//double instanceWeight = ilist.getInstanceWeight(i);
// The code below relies on labelWeights summing to 1 over all labels!
for (int li = 0; li < numClasses; li++) {
double trueLabelWeight = labeling.value (li) / numInExpectation;
double modelLabelWeight = classifications[i].value(li) / numInExpectation;
trueLabelWeightSum += trueLabelWeight;
modelLabelWeightSum += modelLabelWeight;
//if (i < 500) System.out.println ("i="+i+" li="+li+" true="+trueLabelWeight+" model="+modelLabelWeight);
if (trueLabelWeight == 0 && modelLabelWeight == 0)
continue;
for (int fl = 0; fl < fv.numLocations(); fl++) {
fli = fv.indexAtLocation(fl);
assert (fv.valueAtLocation(fl) == 1.0);
//p[li][fli] += trueLabelWeight * instanceWeight / (numInstances+1);
//q[li][fli] += modelLabelWeight * instanceWeight / (numInstances+1);
p[li][fli] += trueLabelWeight;
q[li][fli] += modelLabelWeight;
}
}
}
assert (Math.abs (trueLabelWeightSum - 1.0) < 0.001)
: "trueLabelWeightSum should be 1.0, it was "+trueLabelWeightSum;
assert (Math.abs (modelLabelWeightSum - 1.0) < 0.001)
: "modelLabelWeightSum should be 1.0, it was "+modelLabelWeightSum;
/*
double psum = 0;
double qsum = 0;
for (int i = 0; i < numClasses; i++)
for (int j = 0; j < numFeatures; j++) {
psum += p[i][j];
qsum += q[i][j];
}
assert (Math.abs(psum - 1.0) < 0.0001) : "psum not 1.0! psum="+psum+" qsum="+qsum;
assert (Math.abs(qsum - 1.0) < 0.0001) : "qsum not 1.0! psum="+psum+" qsum="+qsum;
*/
for (int i = 0; i < numClasses; i++)
for (int j = 0; j < numFeatures; j++)
alphas[i][j] = Math.log ( (p[i][j]*(1.0-q[i][j])) / (q[i][j]*(1.0-p[i][j])) );
//q = null;
// "q[e^{\alpha g}]", p.4
//System.out.println ("Calculating qeag...");
double[][] qeag = new double[numClasses][numFeatures];
modelLabelWeightSum = 0;
for (int i = 0; i < ilist.size(); i++) {
assert (classifications[i].getLabelAlphabet() == ilist.getTargetAlphabet());
Instance inst = ilist.get(i);
Labeling labeling = inst.getLabeling ();
FeatureVector fv = (FeatureVector) inst.getData ();
int fvMaxLocation = fv.numLocations()-1;
for (int li = 0; li < numClasses; li++) {
// q(\omega) = (classifications[i].value(li) / numInstances)
double modelLabelWeight = classifications[i].value(li) / numInstances;
modelLabelWeightSum += modelLabelWeight;
// Following line now done before outside of loop over instances
// for (int fi = 0; fi < numFeatures; fi++) qeag[li][fi] += modelLabelWeight; // * 1.0;
for (int fl = 0; fl < fv.numLocations(); fl++) {
fli = fv.indexAtLocation(fl);
qeag[li][fli] += (modelLabelWeight * Math.exp (alphas[li][fli])) - modelLabelWeight;
}
}
}
for (int li = 0; li < numClasses; li++)
for (int fi = 0; fi < numFeatures; fi++)
// Assume that feature "fi" does not occur in "fv" and thus has value 0.
// exp(alpha * 0) == 1.0
// This factoring is possible because all features have value 1.0
qeag[li][fi] += modelLabelWeightSum; // * 1.0;
//System.out.println ("Calculating klgain values...");
double[] klgains = new double[numFeatures];
for (int i = 0; i < numClasses; i++)
for (int j = 0; j < numFeatures; j++)
if (alphas[i][j] > 0 && !Double.isInfinite(alphas[i][j]))
klgains[j] += (alphas[i][j] * p[i][j]) - Math.log (qeag[i][j]);
//klgains[j] += Math.abs(alphas[i][j] * p[i][j]);
//klgains[j] += Math.abs(alphas[i][j]);
if (true) {
logger.info ("klgains.length="+klgains.length);
for (int j = 0; j < numFeatures; j++) {
if (j % (numFeatures/100) == 0) {
for (int i = 0; i < numClasses; i++) {
logger.info ("c="+i+" p["+ilist.getDataAlphabet().lookupObject(j)+"] = "+p[i][j]);
logger.info ("c="+i+" q["+ilist.getDataAlphabet().lookupObject(j)+"] = "+q[i][j]);
logger.info ("c="+i+" alphas["+ilist.getDataAlphabet().lookupObject(j)+"] = "+alphas[i][j]);
logger.info ("c="+i+" qeag["+ilist.getDataAlphabet().lookupObject(j)+"] = "+qeag[i][j]);
}
logger.info ("klgains["+ilist.getDataAlphabet().lookupObject(j)+"] = "+klgains[j]);
}
}
}
return klgains;
}
public KLGain (InstanceList ilist, LabelVector[] classifications)
{
super (ilist.getDataAlphabet(), calcKLGains (ilist, classifications));
}
private static LabelVector[] getLabelVectorsFromClassifications (Classification[] c)
{
LabelVector[] ret = new LabelVector[c.length];
for (int i = 0; i < c.length; i++)
ret[i] = c[i].getLabelVector();
return ret;
}
public KLGain (InstanceList ilist, Classification[] classifications)
{
super (ilist.getDataAlphabet(),
calcKLGains (ilist, getLabelVectorsFromClassifications(classifications)));
}
}
| 8,153 | 39.167488 | 109 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/PerLabelFeatureCounts.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
The number of instances of each class in which each feature occurs.
Note that we aren't attending to the feature's value, and MALLET doesn't currently
have any support at all for categorical features.
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
public class PerLabelFeatureCounts
{
Alphabet dataAlphabet, targetAlphabet;
FeatureCounts[] fc;
static boolean countInstances = true;
/* xxx This should use memory more sparsely!!! */
private static double[][] calcFeatureCounts (InstanceList ilist)
{
int numClasses = ilist.getTargetAlphabet().size();
int numFeatures = ilist.getDataAlphabet().size();
double[][] featureCounts = new double[numClasses][numFeatures];
// Count features across all classes
for (int i = 0; i < ilist.size(); i++) {
Instance inst = ilist.get(i);
if (!(inst.getData() instanceof FeatureVector))
throw new IllegalArgumentException ("Currently only handles FeatureVector data");
FeatureVector fv = (FeatureVector) inst.getData ();
// xxx Note that this ignores uncertain-labels.
int labelIndex = inst.getLabeling ().getBestIndex();
int fli;
for (int fl = 0; fl < fv.numLocations(); fl++) {
fli = fv.indexAtLocation(fl);
if (countInstances)
featureCounts[labelIndex][fli]++;
else
featureCounts[labelIndex][fli] += fv.valueAtLocation(fl);
}
}
return featureCounts;
}
public PerLabelFeatureCounts (InstanceList ilist)
{
dataAlphabet = ilist.getDataAlphabet();
targetAlphabet = ilist.getTargetAlphabet();
double[][] counts = calcFeatureCounts (ilist);
fc = new FeatureCounts[targetAlphabet.size()];
for (int i = 0; i < fc.length; i++)
fc[i] = new FeatureCounts (dataAlphabet, counts[i]);
}
public static class Factory implements RankedFeatureVector.PerLabelFactory
{
public Factory ()
{
}
public RankedFeatureVector[] newRankedFeatureVectors (InstanceList ilist)
{
PerLabelFeatureCounts x = new PerLabelFeatureCounts (ilist);
return x.fc;
}
}
}
| 2,493 | 30.56962 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/ChainedInstanceIterator.java | package cc.mallet.types;
import java.util.Iterator;
/** Under development, and not sure we actually want to have a class list this.
* It seems quite dangerous, and error-prone.
*/
@Deprecated
public abstract class ChainedInstanceIterator implements Iterator<Instance> {
Iterator<Instance> source;
ChainedInstanceIterator target;
/** Both source and target may be null. */
public ChainedInstanceIterator (Iterator<Instance> source, ChainedInstanceIterator target) {
this.source = source;
}
public abstract Instance next ();
public abstract boolean hasNext ();
public void remove () { throw new IllegalStateException ("This Iterator<Instance> does not support remove()."); }
/** The "source" of this iterator sends this message to tell this iterator
* that, even though source.hasNext() may have returned false before, it
* would now return true.
* Note that not all iterators handle this strange
* situation in which an iterator indicates that hasNext is false, but then
* later hasNext becomes true. In particular, if this iterator has also
* returned false for hasNext() to its consumer, but is now ready to provide
* more since its source now hasNext(), the consumer may not properly handle
* this situation. (One possible solution: create a ChainedIterator interface,
* and be more strict about type-checking all sources and targets of these
* kinds of iterators. -akm) (Also consider passing the source as an argument here.) */
public boolean sourceNowHasNext (Iterator<Instance> source) {
if (target != null) target.sourceNowHasNext(this);
return false;
}
}
| 1,623 | 37.666667 | 114 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/DenseMatrix.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.io.*;
import cc.mallet.util.Maths;
public abstract class DenseMatrix implements Matrix, Serializable
{
double[] values;
protected boolean hasInfinite; // if true, at least one value = -Inf or +Inf
public abstract int getNumDimensions ();
public abstract int getDimensions (int[] sizes);
public abstract double value (int[] indices);
public abstract void setValue (int[] indices, double value);
public abstract ConstantMatrix cloneMatrix ();
public abstract int singleIndex (int[] indices);
public abstract void singleToIndices (int i, int[] indices);
public double singleValue (int i) { return values[i]; }
public void setSingleValue (int i, double value) { values[i] = value; }
public void incrementSingleValue (int i, double delta) { values[i] += delta; }
public void setValueAtLocation (int loc, double value)
{
// indices == locations
setSingleValue (loc, value);
}
public int singleSize () { return values.length; }
public int numLocations () { return values.length; }
public int location (int index) { return index; }
public double valueAtLocation (int location) { return values[location]; }
// Returns a "singleIndex"
public int indexAtLocation (int location) { return location; }
public void setAll (double v) { for (int i = 0; i < values.length; i++) values[i] = v; }
public void set (ConstantMatrix m) {
if (m instanceof DenseMatrix) {
assert (m.singleSize() == values.length);
System.arraycopy (((DenseMatrix)m).values, 0, values, 0, values.length);
} else
for (int i = m.numLocations()-1; i >= 0; i--)
values[m.indexAtLocation(i)] = m.valueAtLocation(i);
}
public void setWithAddend (ConstantMatrix m, double addend) {
if (m instanceof DenseMatrix) {
assert (m.singleSize() == values.length);
for (int i = 0; i < values.length; i++)
values[i] = ((DenseMatrix)m).values[i] + addend;
} else
for (int i = m.numLocations()-1; i >= 0; i--)
values[m.indexAtLocation(i)] = m.valueAtLocation(i) + addend;
}
public void setWithFactor (ConstantMatrix m, double factor) {
if (m instanceof DenseMatrix) {
assert (m.singleSize() == values.length);
for (int i = 0; i < values.length; i++)
values[i] = ((DenseMatrix)m).values[i] * factor;
} else
for (int i = m.numLocations()-1; i >= 0; i--)
values[m.indexAtLocation(i)] = m.valueAtLocation(i) * factor;
}
public void plusEquals (double v) {
for (int i = 0; i < values.length; i++)
values[i] += v;
}
public void plusEquals (ConstantMatrix m) {
if (m instanceof DenseMatrix) {
assert (m.singleSize() == values.length);
for (int i = 0; i < values.length; i++) {
// added by Culotta - 12.10.0 to enforce INF - INF = 0
if(Double.isInfinite(values[i]) &&
Double.isInfinite(m.valueAtLocation(i))) {
double newValue = m.valueAtLocation(i);
// make sure they're opposite signed
if((newValue * values[i]) < 0) {
values[i] = 0.0; // inf - inf = 0
}
else
values[i] += newValue;
}
else
values[i] += m.valueAtLocation(i);
}
}
else
for (int i = m.numLocations()-1; i >= 0; i--) {
// added by Culotta - 12.10.02 to enforce INF - INF = 0
if(Double.isInfinite(values[m.indexAtLocation(i)]) &&
Double.isInfinite(((DenseMatrix)m).values[i])) {
double newValue = m.valueAtLocation(i);
// make sure they're oppisite signed
if((newValue * values[m.indexAtLocation(i)]) < 0) {
values[m.indexAtLocation(i)] = 0.0;
}
else
values[m.indexAtLocation(i)] += newValue;
}
else
values[m.indexAtLocation(i)] += m.valueAtLocation(i);
}
}
public void plusEquals (ConstantMatrix m, double factor) {
if (m instanceof DenseMatrix) {
assert (m.singleSize() == values.length);
for (int i = 0; i < values.length; i++) {
// added by Culotta - 12.10.0 to enforce INF - INF = 0
if(Double.isInfinite(values[i]) &&
Double.isInfinite(m.valueAtLocation(i))) {
double newValue = factor*(m.valueAtLocation(i));
// make sure they're opposite signed
if((newValue * values[i]) < 0) {
values[i] = 0.0; // inf - inf = 0
}
else
values[i] += newValue;
}
else
values[i] += (m.valueAtLocation(i) * factor);
}
}
else
for (int i = m.numLocations()-1; i >= 0; i--){
// added by Culotta - 12.10.02 to enforce INF - INF = 0
if(Double.isInfinite(values[m.indexAtLocation(i)]) &&
Double.isInfinite(m.valueAtLocation(i))) {
double newValue = factor*m.valueAtLocation(i);
// make sure they're oppisite signed
if((newValue * values[m.indexAtLocation(i)]) < 0) {
values[m.indexAtLocation(i)] = 0.0;
}
else
values[m.indexAtLocation(i)] += newValue;
}
else
values[m.indexAtLocation(i)] += m.valueAtLocation(i) * factor;
}
}
public void equalsPlus (double factor, ConstantMatrix m) {
if (m instanceof DenseMatrix) {
assert (m.singleSize() == values.length);
for (int i = 0; i < values.length; i++) {
// added by Culotta - 12.10.0 to enforce INF - INF = 0
if(Double.isInfinite(values[i]) &&
Double.isInfinite(((DenseMatrix)m).values[i])) {
double lhs = factor*values[i];
double rhs = ((DenseMatrix)m).values[i];
// make sure they're opposite signed
if((lhs * rhs) < 0) {
values[i] = 0.0; // inf - inf = 0
}
else
values[i] = lhs + rhs;
}
else
values[i] = factor*values[i] + ((DenseMatrix)m).values[i];
}
}
else
for (int i = m.numLocations()-1; i >= 0; i--) {
// added by Culotta - 12.10.02 to enforce INF - INF = 0
if(Double.isInfinite(values[m.indexAtLocation(i)]) &&
Double.isInfinite(((DenseMatrix)m).values[i])) {
double lhs = factor * values[m.indexAtLocation(i)];
double rhs = m.valueAtLocation(i);
// make sure they're oppisite signed
if((lhs * rhs) < 0) {
values[m.indexAtLocation(i)] = 0.0;
}
else
values[m.indexAtLocation(i)] = lhs + rhs;
}
else
values[m.indexAtLocation(i)] = factor * values[m.indexAtLocation(i)] + m.valueAtLocation(i);
}
}
public void timesEquals (double factor) {
for (int i = 0; i < values.length; i++)
values[i] *= factor;
}
public void elementwiseTimesEquals (ConstantMatrix m) {
if (m instanceof DenseMatrix) {
assert (m.singleSize() == values.length);
for (int i = 0; i < values.length; i++)
values[i] *= ((DenseMatrix)m).values[i];
} else
for (int i = m.numLocations()-1; i >= 0; i--)
values[m.indexAtLocation(i)] *= m.valueAtLocation(i);
}
public void elementwiseTimesEquals (ConstantMatrix m, double factor) {
if (m instanceof DenseMatrix) {
assert (m.singleSize() == values.length);
for (int i = 0; i < values.length; i++)
values[i] *= ((DenseMatrix)m).values[i] * factor;
} else
for (int i = m.numLocations()-1; i >= 0; i--)
values[m.indexAtLocation(i)] *= m.valueAtLocation(i) * factor;
}
public void divideEquals (double factor) {
for (int i = 0; i < values.length; i++)
values[i] /= factor;
}
public void elementwiseDivideEquals (ConstantMatrix m) {
if (m instanceof DenseMatrix) {
assert (m.singleSize() == values.length);
for (int i = 0; i < values.length; i++)
values[i] /= ((DenseMatrix)m).values[i];
} else
for (int i = m.numLocations()-1; i >= 0; i--)
values[m.indexAtLocation(i)] /= m.valueAtLocation(i);
}
public void elementwiseDivideEquals (ConstantMatrix m, double factor) {
if (m instanceof DenseMatrix) {
assert (m.singleSize() == values.length);
for (int i = 0; i < values.length; i++)
values[i] /= ((DenseMatrix)m).values[i] * factor;
} else
for (int i = m.numLocations()-1; i >= 0; i--)
values[m.indexAtLocation(i)] /= m.valueAtLocation(i) * factor;
}
// xxx Perhaps make a special efficient case for binary vectors
public double dotProduct (ConstantMatrix m) {
double ret = 0;
if (m instanceof DenseMatrix) {
assert (m.singleSize() == values.length);
for (int i = 0; i < values.length; i++)
ret += values[i] * ((DenseMatrix)m).values[i];
} else {
for (int i = m.numLocations()-1; i >= 0; i--)
if(m.indexAtLocation(i) < values.length)//fix problem
ret += values[m.indexAtLocation(i)] * m.valueAtLocation(i);
else{
// System.out.println(m.indexAtLocation(i) + ":" + values.length);
// throw new ArrayIndexOutOfBoundsException(m.indexAtLocation(i));
}
}
return ret;
}
public double absNorm() {
double ret = 0;
for (int i = 0; i < values.length; i++)
ret += Math.abs(values[i]);
return ret;
}
public double oneNorm () {
double ret = 0;
for (int i = 0; i < values.length; i++)
ret += values[i];
return ret;
}
public double twoNorm () {
double ret = 0;
for (int i = 0; i < values.length; i++)
ret += values[i] * values[i];
return Math.sqrt (ret);
}
public double infinityNorm () {
double max = Double.NEGATIVE_INFINITY;
for (int i = 0; i < values.length; i++)
if (Math.abs(values[i]) > max)
max = Math.abs(values[i]);
return max;
}
public double oneNormalize () {
double norm = oneNorm();
for (int i = 0; i < values.length; i++)
values[i] /= norm;
return norm;
}
public double twoNormalize () {
double norm = twoNorm();
for (int i = 0; i < values.length; i++)
values[i] /= norm;
return norm;
}
public double absNormalize () {
double norm = absNorm();
if (norm > 0)
for (int i = 0; i < values.length; i++)
values[i] /= norm;
return norm;
}
public double infinityNormalize () {
double norm = infinityNorm();
for (int i = 0; i < values.length; i++)
values[i] /= norm;
return norm;
}
public void print() {
for (int i = 0; i < values.length; i++)
System.out.println ("DenseMatrix["+i+"] = "+values[i]);
}
public boolean isNaN() {
for (int i = 0; i < values.length; i++)
if (Double.isNaN(values[i]))
return true;
return false;
}
public final void substitute (double oldValue, double newValue)
{
for (int i = values.length-1; i >= 0; i--)
if (values[i] == oldValue)
values[i] = newValue;
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private static final int NULL_INTEGER = -1;
private void writeObject (ObjectOutputStream out) throws IOException {
int i, size;
out.writeInt (CURRENT_SERIAL_VERSION);
if (values != null) {
size = values.length;
out.writeInt(size);
for (i=0; i<size; i++) {
out.writeDouble(values[i]);
}
}
else {
out.writeInt(NULL_INTEGER);
}
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int i, size;
this.hasInfinite = false;
int version = in.readInt ();
size = in.readInt();
if (size != NULL_INTEGER) {
values = new double[size];
for (i = 0; i<size; i++) {
values[i] = in.readDouble();
if (Double.isInfinite (values[i]))
this.hasInfinite = true;
}
}
else {
values = null;
}
}
public static void plusEquals (double[] accumulator, double[] addend)
{
assert (accumulator.length == addend.length);
for (int i = 0; i < addend.length; i++)
accumulator[i] += addend[i];
}
public static void plusEquals (double[] accumulator, double[] addend, double factor)
{
assert (accumulator.length == addend.length);
for (int i = 0; i < addend.length; i++)
accumulator[i] += factor * addend[i];
}
public static void timesEquals (double[] accumulator, double[] product)
{
assert (accumulator.length == product.length);
for (int i = 0; i < product.length; i++)
accumulator[i] *= product[i];
}
public static double infinityNorm (double[] vector)
{
double max = Double.NEGATIVE_INFINITY;
for (int i = 0; i < vector.length; i++)
if (Math.abs(vector[i]) > max)
max = Math.abs(vector[i]);
return max;
}
// This should probably be generalized.
public boolean almostEquals (ConstantMatrix m2) {
if (getNumDimensions () != m2.getNumDimensions ()) {
return false;
}
if (numLocations () != m2.numLocations ()) {
return false;
}
int[] dims1 = new int [getNumDimensions ()];
int[] dims2 = new int [getNumDimensions ()];
getDimensions (dims1);
m2.getDimensions (dims2);
for (int i = 0; i < dims1.length; i++) {
if (dims1 [i] != dims2 [i]) {
return false;
}
}
for (int i = 0; i < numLocations(); i++) {
if (!Maths.almostEquals (valueAtLocation (i), m2.valueAtLocation (i))) {
return false;
}
}
return true;
}
}
| 13,143 | 28.339286 | 97 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/MatrixOps.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types;
import java.util.Random;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
/**
* A class of static utility functions for manipulating arrays of
* double.
*/
public final class MatrixOps
{
/** Sets every element of a double array to a given value.
* @param m The array to modify
* @param v The value
*/
public static void setAll (double[] m, double v) {
java.util.Arrays.fill (m, v);
}
public static void set (double[] dest, double[] source) {
// XXX Huh? This if-statement won't work. Java is pass-by-value, buddy. -cas
if (source.length != dest.length)
dest = new double [source.length];
System.arraycopy (source, 0, dest, 0, source.length);
}
/**
* Multiplies every element in an array by a scalar.
* @param m The array
* @param factor The scalar
*/
public static void timesEquals (double[] m, double factor) {
for (int i=0; i < m.length; i++)
m[i] *= factor;
}
/* Calculates the Schur/Hadamard product */ // JJW
public static void timesEquals(double[] m1, double[] m2) {
assert (m1.length == m2.length) : "unequal lengths\n";
for (int i=0; i < m1.length; i++) {
m1[i] *= m2[i];
}
}
/**
* Adds a scalar to every element in an array.
* @param m The array
* @param toadd The scalar
*/
public static void plusEquals (double[] m, double toadd) {
for (int i=0; i < m.length; i++)
m[i] += toadd;
}
public static void plusEquals (double[] m1, double[] m2) {
assert (m1.length == m2.length) : "unequal lengths\n";
for (int i=0; i < m1.length; i++) {
if (Double.isInfinite(m1[i]) && Double.isInfinite(m2[i]) && (m1[i]*m2[i] < 0))
m1[i] = 0.0;
else
m1[i] += m2[i];
}
}
public static void plusEquals (double[] m1, double[] m2, double factor) {
assert (m1.length == m2.length) : "unequal lengths\n";
for (int i=0; i < m1.length; i++) {
double m1i = m1[i];
double m2i = m2[i];
if (Double.isInfinite(m1i) && Double.isInfinite(m2i) && (m1[i]*m2[i] < 0))
m1[i] = 0.0;
else m1[i] += m2[i] * factor;
}
}
public static void plusEquals (double[][] m1, double[][] m2, double factor)
{
assert (m1.length == m2.length) : "unequal lengths\n";
for (int i=0; i < m1.length; i++) {
for (int j=0; j < m1[i].length; j++) {
m1[i][j] += m2[i][j] * factor;
}
}
}
public static void log (double[] m)
{
for (int i = 0; i < m.length; i++)
m[i] = Math.log(m[i]);
}
/** @deprecated Use dotProduct() */
public static double dot (double[] m1, double[] m2) {
assert (m1.length == m2.length) : "m1.length != m2.length\n";
double ret = 0.0;
for (int i=0; i < m1.length; i++)
ret += m1[i] * m2[i];
return ret;
}
public static double dotProduct (double[] m1, double[] m2) {
assert (m1.length == m2.length) : "m1.length != m2.length\n";
double ret = 0.0;
for (int i=0; i < m1.length; i++)
ret += m1[i] * m2[i];
return ret;
}
public static double absNorm (double[] m) {
double ret = 0;
for (int i = 0; i < m.length; i++)
ret += Math.abs(m[i]);
return ret;
}
public static double twoNorm (double[] m) {
double ret = 0;
for (int i = 0; i < m.length; i++)
ret += m[i] * m[i];
return Math.sqrt (ret);
}
public static double twoNormSquared (double[] m) {
double ret = 0;
for (int i = 0; i < m.length; i++)
ret += m[i] * m[i];
return ret;
}
public static double oneNorm (double[] m) {
double ret = 0;
for (int i = 0; i < m.length; i++)
ret += m[i];
return ret;
}
public static double oneNormalize (double[] m) {
double sum = oneNorm(m);
for (int i = 0; i < m.length; i++)
m[i] /= sum;
return sum;
}
public static double normalize (double[] m) {
return oneNormalize(m);
}
public static double infinityNorm (double[] m) {
double ret = Double.NEGATIVE_INFINITY;
for (int i = 0; i < m.length; i++)
if (Math.abs(m[i]) > ret)
ret = Math.abs(m[i]);
return ret;
}
public static double absNormalize (double[] m) {
double norm = absNorm(m);
if (norm > 0)
for (int i = 0; i < m.length; i++)
m[i] /= norm;
return norm;
}
public static double twoNormalize (double[] m) {
double norm = twoNorm(m);
if (norm > 0)
for (int i = 0; i < m.length; i++)
m[i] /= norm;
return norm;
}
public static void substitute (double[] m, double oldValue, double newValue) {
for (int i = m.length-1; i >= 0; i--)
if (m[i] == oldValue)
m[i] = newValue;
}
/** If "ifSelected" is false, it reverses the selection. If
"fselection" is null, this implies that all features are
selected; all values in the row will be changed unless
"ifSelected" is false. */
public static final void rowSetAll (double[] m, int nc, int ri, double v, FeatureSelection fselection, boolean ifSelected) {
if (fselection == null) {
if (ifSelected == true) {
for (int ci = 0; ci < nc; ci++)
m[ri*nc+ci] = v;
}
} else {
// xxx Temporary check for full selection
//assert (fselection.nextDeselectedIndex (0) == nc);
for (int ci = 0; ci < nc; ci++)
if (fselection.contains(ci) ^ !ifSelected)
m[ri*nc+ci] = v;
}
}
public static double rowDotProduct (double[] m, int nc, int ri,
Vector v, int maxCi,
FeatureSelection selection) {
return rowDotProduct (m, nc, ri, v, 1, maxCi, selection);
}
public static double rowDotProduct (double[] m, int nc, int ri,
Vector v, double factor, int maxCi,
FeatureSelection selection) {
double ret = 0;
if (selection != null) {
int size = v.numLocations();
for (int cil = 0; cil < size; cil++) {
int ci = v.indexAtLocation (cil);
if (selection.contains(ci) && ci < nc && ci <= maxCi)
ret += m[ri*nc+ci] * v.valueAtLocation(cil) * factor;
}
} else {
int size = v.numLocations();
for (int cil = 0; cil < size; cil++) {
int ci = v.indexAtLocation (cil);
if (ci <= maxCi)
ret += m[ri*nc+ci] * v.valueAtLocation(cil) * factor;
}
}
return ret;
}
public static final void rowPlusEquals (double[] m, int nc, int ri,
Vector v, double factor) {
for (int vli = 0; vli < v.numLocations(); vli++)
m[ri*nc+v.indexAtLocation(vli)] += v.valueAtLocation(vli) * factor;
}
public static boolean isNaN(double[] m) {
for (int i = 0; i < m.length; i++)
if (Double.isNaN(m[i]))
return true;
return false;
}
// gsc: similar to isNan, but checks for inifinite values
public static boolean isInfinite(double[] m) {
for (int i = 0; i < m.length; i++)
if (Double.isInfinite(m[i]))
return true;
return false;
}
// gsc: returns true if any value in the array is either NaN or infinite
public static boolean isNaNOrInfinite(double[] m) {
for (int i = 0; i < m.length; i++)
if (Double.isInfinite(m[i]) || Double.isNaN(m[i]))
return true;
return false;
}
// gsc: returns true if any value in the array is greater than 0.0/-0.0
public static boolean isNonZero(double[] m) {
for (int i = 0; i < m.length; i++)
if (Math.abs(m[i]) > 0.0)
return true;
return false;
}
// gsc: returns true if any value in the array is 0.0/-0.0
public static boolean isZero(double[] m) {
for (int i = 0; i < m.length; i++)
if (Math.abs(m[i]) == 0.0)
return true;
return false;
}
// TODO: This is the same as oneNorm(), and should be removed
public static double sum (double[] m) {
double sum = 0;
for (int i = 0; i < m.length; i++)
sum += m[i];
return sum;
}
public static double sum (double[][] m) {
double sum = 0;
for (int i = 0; i < m.length; i++)
for (int j = 0; j < m[i].length; j++)
sum += m[i][j];
return sum;
}
public static int sum (int[] m) {
int sum = 0;
for (int i = 0; i < m.length; i++)
sum += m[i];
return sum;
}
// CPAL
// TODO: This should be removed, because FeatureVector already has oneNorm(). -AKM
public static double sum(Vector v) {
double sum = 0;
for (int vli = 0; vli < v.numLocations(); vli++) {
sum = sum + v.valueAtLocation(vli);
}
return sum;
}
public static double mean (double[] m) {
double sum = 0;
for (int i = 0; i < m.length; i++)
sum += m[i];
return sum / m.length;
}
/** Return the standard deviation */
public static double stddev (double[] m) {
double mean = mean (m);
double s = 0;
for (int i = 0; i < m.length; i++)
s += (m[i] - mean) * (m[i] - mean);
return Math.sqrt (s/m.length);
// Some prefer dividing by (m.length-1), but this is also common
}
public static double stderr (double[] m ) {
return stddev(m) / Math.sqrt (m.length);
}
// gsc
/** Return the variance */
public static double variance (double[] m) {
double mean = mean (m);
double s = 0;
for (int i = 0; i < m.length; i++)
s += (m[i] - mean) * (m[i] - mean);
return s/m.length;
// Some prefer dividing by (m.length-1), but this is also common
}
/**
* Prints a double array to standard output
* @param m Array to print.
*/
public static final void print (double[] m)
{
print (new PrintWriter (new OutputStreamWriter (System.out), true), m);
}
/**
* Prints a double array to the given PrintWriter.
* @param out Writer to print ouput to
* @param m Array to print.
*/
public static final void print (PrintWriter out, double[] m)
{
for (int i = 0; i < m.length; i++) {
out.print (" " + m[i]);
}
out.println("");
}
public static final void print (double[][] arr)
{
for (int i = 0; i < arr.length; i++) {
double[] doubles = arr[i];
print (doubles);
}
}
/** Print a space-separated array of elements
*
* @param m An array of any type
*/
public static final String toString( Object m ) {
StringBuffer sb = new StringBuffer();
int n=java.lang.reflect.Array.getLength(m)-1;
for (int i = 0; i<n ; i++) {
sb.append(java.lang.reflect.Array.get(m,i));
sb.append(" ");
}
if (n>=0)
sb.append(java.lang.reflect.Array.get(m,n));
return sb.toString();
}
public static final void printInRows (double[] arr)
{
for (int i = 0; i < arr.length; i++) {
double v = arr[i];
System.out.println("["+i+"] "+arr[i]);
}
}
public static void setAll (double[][][] m, double v)
{
for (int i = 0; i < m.length; i++) {
for (int j = 0; j < m[i].length; j++) {
for (int k = 0; k < m[i][j].length; k++) {
m[i][j][k] = v;
}
}
}
}
public static void setAll (double[][] m, double v)
{
for (int i = 0; i < m.length; i++) {
for (int j = 0; j < m[i].length; j++) {
m[i][j] = v;
}
}
}
public static void print (int[][] arr)
{
for (int i = 0; i < arr.length; i++) {
print (arr [i]);
}
}
public static void print (int[] m)
{
for (int i = 0; i < m.length; i++) {
System.out.print (" " + m[i]);
}
System.out.println("");
}
public static double[] randomVector (int n, Random r)
{
double[] ret = new double [n];
for (int i = 0; i < n; i++) ret[i] = r.nextDouble ();
return ret;
}
public static void timesEquals (double[][] m, double factor)
{
for (int i = 0; i < m.length; i++) {
for (int j = 0; j < m[i].length; j++) {
m[i][j] *= factor;
}
}
}
/**
* Returns the maximum elementwise absolute difference between two vectors.
* This is the same as infinityNorm (v1 - v2) (if that were legal Java).
* @param v1 Input vector, as double[]
* @param v2 Input vector, as double[]
* @return
*/
public static double maxAbsdiff (double[] v1, double[] v2)
{
double max = Double.NEGATIVE_INFINITY;
for (int i = 0; i < v1.length; i++) {
double val = Math.abs (v1[i] - v2[i]);
if (val > max) max = val;
}
return max;
}
public static int max (int[][] m) {
int maxval = m[0][0];
for (int i=0; i < m.length; i++) {
for (int j=0; j < m[i].length; j++) {
if (m[i][j] > maxval) {
maxval = m[i][j];
}
}
}
return maxval;
}
public static int max (int [] elems)
{
int max = Integer.MIN_VALUE;
for (int i = 0; i < elems.length; i++) {
int elem = elems[i];
if (elem > max) {
max = elem;
}
}
return max;
}
public static double max (double [] elems)
{
double max = Double.NEGATIVE_INFINITY;
for (int i = 0; i < elems.length; i++) {
double elem = elems[i];
if (elem > max) {
max = elem;
}
}
return max;
}
public static double min (double [] elems)
{
double min = Double.POSITIVE_INFINITY;
for (int i = 0; i < elems.length; i++) {
double elem = elems[i];
if (elem < min) {
min = elem;
}
}
return min;
}
public static int maxIndex (double [] elems)
{
double max = Double.NEGATIVE_INFINITY;
int maxIndex = -1;
for (int i = 0; i < elems.length; i++) {
double elem = elems[i];
if (elem > max) {
max = elem;
maxIndex = i;
}
}
return maxIndex;
}
public static int minIndex (double [] elems)
{
double min = Double.POSITIVE_INFINITY;
int minIndex = -1;
for (int i = 0; i < elems.length; i++) {
double elem = elems[i];
if (elem < min) {
min = elem;
minIndex = i;
}
}
return minIndex;
}
public static double[] append (double[] original, double newValue)
{
double[] ret = new double[original.length + 1];
System.arraycopy(original, 0, ret, 0, original.length);
ret[original.length] = newValue;
return ret;
}
public static double max(double[][] ds) {
double max = Double.NEGATIVE_INFINITY;
for (int i = 0; i < ds.length; i++) {
for (int j = 0; j < ds[i].length; j++) {
if (ds[i][j] > max) {
max = ds[i][j];
}
}
}
return max;
}
public static int[] maxIndex(double[][] ds) {
int[] maxIndices = new int[] {-1,-1};
double max = Double.NEGATIVE_INFINITY;
for (int i = 0; i < ds.length; i++) {
for (int j = 0; j < ds[i].length; j++) {
if (ds[i][j] > max) {
max = ds[i][j];
maxIndices[0] = i;
maxIndices[1] = j;
}
}
}
return maxIndices;
}
}
| 16,208 | 26.566327 | 128 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/SingleInstanceIterator.java | package cc.mallet.types;
import java.util.Iterator;
public class SingleInstanceIterator implements Iterator<Instance> {
Instance nextInstance;
boolean doesHaveNext;
public SingleInstanceIterator (Instance inst) {
nextInstance = inst;
doesHaveNext = true;
}
public boolean hasNext() {
return doesHaveNext;
}
public Instance next() {
doesHaveNext = false;
return nextInstance;
}
public void remove () { throw new IllegalStateException ("This iterator does not support remove()."); }
}
| 514 | 17.392857 | 104 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/IDSorter.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types;
/**
* This class is contains a comparator for use in sorting
* integers that have associated floating point values. One
* example would be sorting words by probability in a Naive Bayes
* model. Ties are broken by the ID.
* <code><pre>
* IDSorter[] sortedIDs = new IDSorter[n];
* for (int i=0; i<n; i++) {
* sortedIDs[i] = new IDSorter(i, weights[i]);
* }
* Arrays.sort(sortedIDs);
*
* for (int i=0; i<10; i++) {
* </pre></code>
*
* @author David Mimno
*/
public class IDSorter implements Comparable {
int id; double p;
public IDSorter (int id, double p) { this.id = id; this.p = p; }
public IDSorter (int id, int p) { this.id = id; this.p = p; }
public final int compareTo (Object o2) {
double otherP = ((IDSorter) o2).p;
if (p > ((IDSorter) o2).p) {
return -1;
}
else if (p < ((IDSorter) o2).p) {
return 1;
}
// p == otherP, sort by ID
int otherID = ((IDSorter) o2).id;
if (id > otherID) { return -1; }
else if (id < otherID) { return 1; }
return 0;
}
public int getID() {return id;}
public double getWeight() {return p;}
/** Reinitialize an IDSorter */
public void set(int id, double p) { this.id = id; this.p = p; }
}
| 1,699 | 27.813559 | 76 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Instance.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types;
import java.util.logging.*;
import java.io.*;
import cc.mallet.pipe.Pipe;
import cc.mallet.types.Labeling;
import cc.mallet.util.MalletLogger;
import cc.mallet.util.PropertyList;
/**
A machine learning "example" to be used in training, testing or
performance of various machine learning algorithms.
<p>An instance contains four generic fields of predefined name:
"data", "target", "name", and "source". "Data" holds the data represented
`by the instance, "target" is often a label associated with the instance,
"name" is a short identifying name for the instance (such as a filename),
and "source" is human-readable sourceinformation, (such as the original text).
<p> Each field has no predefined type, and may change type as the instance
is processed. For example, the data field may start off being a string that
represents a file name and then be processed by a {@link cc.mallet.pipe.Pipe} into a CharSequence
representing the contents of the file, and eventually to a feature vector
holding indices into an {@link cc.mallet.types.Alphabet} holding words found in the file.
It is up to each pipe which fields in the Instance it modifies; the most common
case is that the pipe modifies the data field.
<p>Generally speaking, there are two modes of operation for
Instances. (1) An instance gets created and passed through a
Pipe, and the resulting data/target/name/source fields are used.
This is generally done for training instances. (2) An instance
gets created with raw values in its slots, then different users
of the instance call newPipedCopy() with their respective
different pipes. This might be done for test instances at
"performance" time.
<p> Rather than store an {@link cc.mallet.types.Alphabet} in the Instance,
we obtain it through the Pipe instance variable, because the Pipe also
indicates where the data came from and how to interpret the Alphabet.
<p>Instances can be made immutable if locked.
Although unlocked Instances are mutable, typically the only code that
changes the values in the four slots is inside Pipes.
<p> Note that constructing an instance with a pipe argument means
"Construct the instance and then run it through the pipe".
{@link cc.mallet.types.InstanceList} uses this method
when adding instances through a pipeInputIterator.
@see Pipe
@see Alphabet
@see InstanceList
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
public class Instance implements Serializable, AlphabetCarrying, Cloneable
{
private static Logger logger = MalletLogger.getLogger(Instance.class.getName());
protected Object data; // The input data in digested form, e.g. a FeatureVector
protected Object target; // The output data in digested form, e.g. a Label
protected Object name; // A readable name of the source, e.g. for ML error analysis
protected Object source; /* The input in a reproducable form, e.g. enabling re-print of
string w/ POS tags, usually without target information,
e.g. an un-annotated RegionList. */
PropertyList properties = null;
boolean locked = false;
/** In certain unusual circumstances, you might want to create an Instance
* without sending it through a pipe.
*/
public Instance (Object data, Object target, Object name, Object source)
{
this.data = data;
this.target = target;
this.name = name;
this.source = source;
}
public Object getData () { return data; }
public Object getTarget () { return target; }
public Object getName () { return name; }
public Object getSource () { return source; }
public Alphabet getDataAlphabet() {
if (data instanceof AlphabetCarrying)
return ((AlphabetCarrying)data).getAlphabet();
else
return null;
}
public Alphabet getTargetAlphabet() {
if (target instanceof AlphabetCarrying)
return ((AlphabetCarrying)target).getAlphabet();
else
return null;
}
public Alphabet getAlphabet () {
return getDataAlphabet();
}
public Alphabet[] getAlphabets()
{
return new Alphabet[] {getDataAlphabet(), getTargetAlphabet()};
}
public boolean alphabetsMatch (AlphabetCarrying object)
{
Alphabet[] oas = object.getAlphabets();
return oas.length == 2 && oas[0].equals(getDataAlphabet()) && oas[1].equals(getDataAlphabet());
}
public boolean isLocked () { return locked; }
public void lock() { locked = true; }
public void unLock() { locked = false; }
public Labeling getLabeling ()
{
if (target == null || target instanceof Labeling)
return (Labeling)target;
throw new IllegalStateException ("Target is not a Labeling; it is a "+target.getClass().getName());
}
public void setData (Object d) {
if (!locked) data = d;
else throw new IllegalStateException ("Instance is locked.");
}
public void setTarget (Object t) {
if (!locked) target = t;
else throw new IllegalStateException ("Instance is locked.");
}
public void setLabeling (Labeling l) {
// This test isn't strictly necessary, but might catch some typos.
assert (target == null || target instanceof Labeling);
if (!locked) target = l;
else throw new IllegalStateException ("Instance is locked.");
}
public void setName (Object n) {
if (!locked) name = n;
else throw new IllegalStateException ("Instance is locked.");
}
public void setSource (Object s) {
if (!locked) source = s;
else throw new IllegalStateException ("Instance is locked.");
}
public void clearSource () {
source = null;
}
public Instance shallowCopy ()
{
Instance ret = new Instance (data, target, name, source);
ret.locked = locked;
ret.properties = properties;
return ret;
}
public Object clone ()
{
return shallowCopy();
}
// Setting and getting properties
public void setProperty (String key, Object value)
{
properties = PropertyList.add (key, value, properties);
}
public void setNumericProperty (String key, double value)
{
properties = PropertyList.add (key, value, properties);
}
@Deprecated
public PropertyList getProperties ()
{
return properties;
}
@Deprecated
public void setPropertyList (PropertyList p)
{
if (!locked) properties = p;
else throw new IllegalStateException ("Instance is locked.");
}
public Object getProperty (String key)
{
return properties == null ? null : properties.lookupObject (key);
}
public double getNumericProperty (String key)
{
return (properties == null ? 0.0 : properties.lookupNumber (key));
}
public boolean hasProperty (String key)
{
return (properties == null ? false : properties.hasProperty (key));
}
// Serialization of Instance
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject(data);
out.writeObject(target);
out.writeObject(name);
out.writeObject(source);
out.writeObject(properties);
out.writeBoolean(locked);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
data = in.readObject();
target = in.readObject();
name = in.readObject();
source = in.readObject();
properties = (PropertyList) in.readObject();
locked = in.readBoolean();
}
}
| 7,856 | 30.809717 | 102 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Labels.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
A collection of labels, either for a multi-label problem (all
labels are part of the same label dictionary), or a factorized
labeling, (each label is part of a different dictionary).
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.io.Serializable;
import java.io.ObjectOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import cc.mallet.types.Label;
/** Usually some distribution over possible labels for an instance. */
public class Labels implements AlphabetCarrying, Serializable
{
Label[] labels;
public Labels (Label[] labels)
{
for (int i = 0; i < labels.length-1; i++)
if (!Alphabet.alphabetsMatch(labels[i], labels[i+1]))
throw new IllegalArgumentException ("Alphabets do not match");
this.labels = new Label[labels.length];
System.arraycopy (labels, 0, this.labels, 0, labels.length);
}
// Number of factors
public int size () { return labels.length; }
public Label get (int i) { return labels[i]; }
public void set (int i, Label l) { labels[i] = l; }
public String toString ()
{
String ret = "";
for (int i = 0; i < labels.length; i++) {
ret += labels[i].toString();
if (i < labels.length - 1) ret += " ";
}
return ret;
}
public Alphabet getAlphabet () { return labels[0].getAlphabet(); }
public Alphabet[] getAlphabets () { return labels[0].getAlphabets(); }
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt(CURRENT_SERIAL_VERSION);
out.defaultWriteObject ();
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
in.defaultReadObject ();
}
}
| 2,310 | 27.8875 | 93 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/InfoGain.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
Information gain of the absence/precense of each feature.
Note that we aren't attending to the feature's value, and MALLET doesn't currently
have any support at all for categorical features.
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
public class InfoGain extends RankedFeatureVector
{
// xxx This is DISGUSTINGLY non-thread-safe.
static double staticBaseEntropy;
static LabelVector staticBaseLabelDistribution;
// xxx Yuck. Figure out how to remove this.
// Not strictly part of a list of feature info gains, but convenient and efficient
// for ml.classify.DecisionTree
double baseEntropy;
LabelVector baseLabelDistribution;
private static double[] calcInfoGains (InstanceList ilist)
{
final double log2 = Math.log(2);
int numInstances = ilist.size();
int numClasses = ilist.getTargetAlphabet().size();
int numFeatures = ilist.getDataAlphabet().size();
double[] infogains = new double[numFeatures];
double[][] targetFeatureCount = new double[numClasses][numFeatures];
double[] featureCountSum = new double[numFeatures];
double[] targetCount = new double[numClasses];
double targetCountSum = 0;
double flv; // feature location value
int fli; // feature location index
double count;
// Populate targetFeatureCount, et al
for (int i = 0; i < ilist.size(); i++) {
Instance inst = ilist.get(i);
Labeling labeling = inst.getLabeling ();
FeatureVector fv = (FeatureVector) inst.getData ();
double instanceWeight = ilist.getInstanceWeight(i);
// The code below relies on labelWeights summing to 1 over all labels!
double labelWeightSum = 0;
for (int ll = 0; ll < labeling.numLocations(); ll++) {
int li = labeling.indexAtLocation (ll);
double labelWeight = labeling.valueAtLocation (ll);
labelWeightSum += labelWeight;
if (labelWeight == 0) continue;
count = labelWeight * instanceWeight;
for (int fl = 0; fl < fv.numLocations(); fl++) {
fli = fv.indexAtLocation(fl);
// xxx Is this right? What should we do about negative values?
// Whatever is decided here should also go in DecisionTree.split()
if (fv.valueAtLocation(fl) > 0) {
targetFeatureCount[li][fli] += count;
featureCountSum[fli] += count;
}
}
targetCount[li] += count;
targetCountSum += count;
}
assert (Math.abs (labelWeightSum - 1.0) < 0.0001);
}
if (targetCountSum == 0) {
staticBaseEntropy = 0.0; // xxx Should this instead by infinite?
staticBaseLabelDistribution = new LabelVector ((LabelAlphabet)ilist.getTargetAlphabet(), targetCount);
return infogains;
}
assert (targetCountSum > 0) : targetCountSum;
double p;
double[] classDistribution = new double[numClasses];
// Calculate the overall entropy of the labels, ignoring the features
staticBaseEntropy = 0;
//System.out.print ("targetCount "); Vector.print (targetCount);
//System.out.println ("targetCountSum = "+targetCountSum);
for (int li = 0; li < numClasses; li++) {
p = targetCount[li]/targetCountSum;
classDistribution[li] = p;
assert (p <= 1.0) : p;
if (p != 0)
staticBaseEntropy -= p * Math.log(p) / log2;
}
staticBaseLabelDistribution = new LabelVector ((LabelAlphabet)ilist.getTargetAlphabet(), classDistribution);
//System.out.println ("Total class entropy = "+staticBaseEntropy);
// Calculate the InfoGain of each feature
for (int fi = 0; fi < numFeatures; fi++) {
double featurePresentEntropy = 0;
double norm = featureCountSum[fi];
if (norm > 0) {
for (int li = 0; li < numClasses; li++) {
p = targetFeatureCount[li][fi]/norm;
assert (p <= 1.00000001) : p;
if (p != 0)
featurePresentEntropy -= p * Math.log(p) / log2;
}
}
assert (!Double.isNaN(featurePresentEntropy)) : fi;
norm = targetCountSum-featureCountSum[fi];
double featureAbsentEntropy = 0;
if (norm > 0) {
for (int li = 0; li < numClasses; li++) {
p = (targetCount[li]-targetFeatureCount[li][fi])/norm;
assert (p <= 1.00000001) : p;
if (p != 0)
featureAbsentEntropy -= p * Math.log(p) / log2;
}
}
assert (!Double.isNaN(featureAbsentEntropy)) : fi;
//Alphabet dictionary = ilist.getDataAlphabet();
//System.out.println ("Feature="+dictionary.lookupSymbol(fi)+" presentWeight="
//+(featureCountSum[fi]/targetCountSum)+" absentWeight="
//+((targetCountSum-featureCountSum[fi])/targetCountSum)+" presentEntropy="
//+featurePresentEntropy+" absentEntropy="
//+featureAbsentEntropy);
infogains[fi] = (staticBaseEntropy
- (featureCountSum[fi]/targetCountSum) * featurePresentEntropy
- ((targetCountSum-featureCountSum[fi])/targetCountSum) * featureAbsentEntropy);
assert (!Double.isNaN(infogains[fi])) : fi;
}
return infogains;
}
public InfoGain (InstanceList ilist)
{
super (ilist.getDataAlphabet(), calcInfoGains (ilist));
baseEntropy = staticBaseEntropy;
baseLabelDistribution = staticBaseLabelDistribution;
}
public InfoGain (Alphabet vocab, double[] infogains)
{
super (vocab, infogains);
}
public double getBaseEntropy ()
{
return baseEntropy;
}
public LabelVector getBaseLabelDistribution ()
{
return baseLabelDistribution;
}
public static class Factory implements RankedFeatureVector.Factory
{
public Factory ()
{
}
public RankedFeatureVector newRankedFeatureVector (InstanceList ilist)
{
return new InfoGain (ilist);
}
}
}
| 5,927 | 33.465116 | 110 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/PartiallyRankedFeatureVector.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/** Just like RankedFeatureVector, only NaNs are allowed and are unranked.
@author Jerod Weinman <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import cc.mallet.types.Label;
import cc.mallet.types.RankedFeatureVector;
public class PartiallyRankedFeatureVector extends RankedFeatureVector
{
private static final int SORTINIT = -1;
int numRanked = -1;
public PartiallyRankedFeatureVector (Alphabet dict, int[] indices,
double[] values)
{
super (dict, indices, values);
}
public PartiallyRankedFeatureVector (Alphabet dict, double[] values)
{
super (dict, values);
}
public PartiallyRankedFeatureVector (Alphabet dict, DenseVector v)
{
this (dict, v.values);
}
public PartiallyRankedFeatureVector (Alphabet dict,
AugmentableFeatureVector v)
{
super (dict, v );
}
public PartiallyRankedFeatureVector (Alphabet dict, SparseVector v)
{
super (dict, v );
}
public int numRanked () {
if (numRanked == -1)
{
numRanked = 0;
for (int i=0; i<values.length ; i++) {
if (!Double.isNaN(values[i]))
numRanked++;
}
}
return numRanked;
}
protected void setRankOrder ( int extent, boolean reset)
{
int sortExtent;
// Set the number of cells to sort, making sure we don't go past the max
// Sorting n-1 sorts the whole array.
sortExtent = (extent >= values.length) ? values.length - 1: extent;
if (sortExtent>=numRanked())
return;
if (sortedTo == SORTINIT || reset) { // reinitialize and sort
this.rankOrder = new int[values.length];
for (int i = 0; i < rankOrder.length; i++) {
rankOrder[i] = i;
}
}
// Selection sort
double max, front, next;
int maxIndex;
for (int i = sortedTo+1 ; i<=sortExtent ; i++ ) {
front = values[rankOrder[i]];
if (Double.isNaN( front ) )
max = Double.NEGATIVE_INFINITY;
else
max = front;
maxIndex = i;
for (int j=sortedTo+1 ; j<rankOrder.length ; j++ ) {
next = values[rankOrder[j]];
if (!Double.isNaN(next) && next>max )
{
max = next;
maxIndex = j;
}
}
// swap
int r = rankOrder[maxIndex];
rankOrder[maxIndex] = rankOrder[i];
rankOrder[i] = r;
sortedTo = i;
}
}
public interface Factory
{
public PartiallyRankedFeatureVector newPartiallyRankedFeatureVector
(InstanceList ilist, LabelVector[] posteriors);
}
public interface PerLabelFactory
{
public PartiallyRankedFeatureVector[] newPartiallyRankedFeatureVectors
(InstanceList ilist, LabelVector[] posteriors);
}
}
| 2,966 | 20.656934 | 87 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/ExpGain.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
The "gain" obtained by adding a feature to a conditional exponential model.
Based on the joint exponential model in Della Pietra, Della Pietra & Lafferty, 1997.
We smooth using a Gaussian prior.
Note that we use Math.log(), not log-base-2, so the units are not "bits".
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.util.logging.*;
import java.io.*;
import cc.mallet.classify.Classification;
import cc.mallet.util.MalletLogger;
public class ExpGain extends RankedFeatureVector
{
private static Logger logger = MalletLogger.getLogger(ExpGain.class.getName());
// ExpGain of a feature, f, is defined in terms of MaxEnt-type feature+class "Feature"s, F,
// F = f,c
// ExpGain of a Feature, F, is
// G(F) = KL(p~[C]||q[C]) - KL(p~[C]||q_F[C])
// where p~[] is the empirical distribution, according to the true class label distribution
// and q[] is the distribution from the (imperfect) classifier
// and q_F[] is the distribution from the (imperfect) classifier with F added
// and F's weight adjusted (but none of the other weights adjusted)
// ExpGain of a feature,f, is
// G(f) = sum_c G(f,c)
// It would be more accurate to return a gain number for each "feature/class" combination,
// but here we simply return the gain(feature) = \sum_{class} gain(feature,class).
// xxx Not ever used. Remove them.
boolean usingHyperbolicPrior = false;
double hyperbolicSlope = 0.2;
double hyperbolicSharpness = 10.0;
private static double[] calcExpGains (InstanceList ilist, LabelVector[] classifications,
double gaussianPriorVariance)
{
int numInstances = ilist.size();
int numClasses = ilist.getTargetAlphabet().size();
int numFeatures = ilist.getDataAlphabet().size();
assert (ilist.size() > 0);
// Notation from Della Pietra & Lafferty 1997, p.4
// "p~"
double[][] p = new double[numClasses][numFeatures];
// "q"
double[][] q = new double[numClasses][numFeatures];
// "alpha", the weight of the new feature
double[][] alphas = new double[numClasses][numFeatures];
int fli; // feature location index
double flv; // feature location value
logger.info ("Starting klgains, #instances="+numInstances);
double trueLabelWeightSum = 0;
double modelLabelWeightSum = 0;
// Calculate p~[f] and q[f]
for (int i = 0; i < numInstances; i++) {
assert (classifications[i].getLabelAlphabet() == ilist.getTargetAlphabet());
Instance inst = ilist.get(i);
Labeling labeling = inst.getLabeling ();
FeatureVector fv = (FeatureVector) inst.getData ();
//double instanceWeight = ilist.getInstanceWeight(i);
// The code below relies on labelWeights summing to 1 over all labels!
double perInstanceModelLabelWeight = 0;
for (int li = 0; li < numClasses; li++) {
double trueLabelWeight = labeling.value (li);
double modelLabelWeight = classifications[i].value(li);
trueLabelWeightSum += trueLabelWeight;
modelLabelWeightSum += modelLabelWeight;
perInstanceModelLabelWeight += modelLabelWeight;
//if (i < 500) System.out.println ("i="+i+" li="+li+" true="+trueLabelWeight+" model="+modelLabelWeight);
if (trueLabelWeight == 0 && modelLabelWeight == 0)
continue;
for (int fl = 0; fl < fv.numLocations(); fl++) {
fli = fv.indexAtLocation(fl);
assert (fv.valueAtLocation(fl) == 1.0);
// xxx Note that we are not attenting to instanceWeight here!
//p[li][fli] += trueLabelWeight * instanceWeight / (numInstances+1);
//q[li][fli] += modelLabelWeight * instanceWeight / (numInstances+1);
p[li][fli] += trueLabelWeight;
q[li][fli] += modelLabelWeight;
}
}
assert (Math.abs (perInstanceModelLabelWeight - 1.0) < 0.001);
}
assert (Math.abs (trueLabelWeightSum/numInstances - 1.0) < 0.001)
: "trueLabelWeightSum should be 1.0, it was "+trueLabelWeightSum;
assert (Math.abs (modelLabelWeightSum/numInstances - 1.0) < 0.001)
: "modelLabelWeightSum should be 1.0, it was "+modelLabelWeightSum;
/*
double psum = 0;
double qsum = 0;
for (int i = 0; i < numClasses; i++)
for (int j = 0; j < numFeatures; j++) {
psum += p[i][j];
qsum += q[i][j];
}
assert (Math.abs(psum - 1.0) < 0.0001) : "psum not 1.0! psum="+psum+" qsum="+qsum;
assert (Math.abs(qsum - 1.0) < 0.0001) : "qsum not 1.0! psum="+psum+" qsum="+qsum;
*/
// Determine the alphas
// We can't do it in closed form as in the Della Pietra paper, because this we have here
// a conditional MaxEnt model.
// So we do it by Newton-Raphson...
// ...initializing by the broken, inappropriate joint-case closed-form solution:
//for (int i = 0; i < numClasses; i++)
//for (int j = 0; j < numFeatures; j++)
//alphas[i][j] = Math.log ( (p[i][j]*(1.0-q[i][j])) / (q[i][j]*(1.0-p[i][j])) );
double[][] dalphas = new double[numClasses][numFeatures]; // first derivative
double[][] alphaChangeOld = new double[numClasses][numFeatures]; // change in alpha, last iteration
double[][] alphaMax = new double[numClasses][numFeatures]; // change in alpha, last iteration
double[][] alphaMin = new double[numClasses][numFeatures]; // change in alpha, last iteration
double[][] ddalphas = new double[numClasses][numFeatures];// second derivative
for (int i = 0; i < numClasses; i++)
for (int j = 0; j < numFeatures; j++) {
alphaMax[i][j] = Double.POSITIVE_INFINITY;
alphaMin[i][j] = Double.NEGATIVE_INFINITY;
}
double maxAlphachange = 0;
double maxDalpha = 99;
int maxNewtonSteps = 50; // xxx Change to more?
// alphas[][] are initialized to zero
for (int newton = 0; maxDalpha > 1.0E-8 && newton < maxNewtonSteps; newton++) {
//System.out.println ("Newton iteration "+newton);
if (false /*usingHyperbolicPrior*/) {
for (int i = 0; i < numClasses; i++)
for (int j = 0; j < numFeatures; j++) {
dalphas[i][j] = p[i][j] - (alphas[i][j] / gaussianPriorVariance);
ddalphas[i][j] = -1 / gaussianPriorVariance;
}
} else {
// Gaussian prior
for (int i = 0; i < numClasses; i++)
for (int j = 0; j < numFeatures; j++) {
dalphas[i][j] = p[i][j] - (alphas[i][j] / gaussianPriorVariance);
ddalphas[i][j] = -1 / gaussianPriorVariance;
}
}
for (int i = 0; i < ilist.size(); i++) {
assert (classifications[i].getLabelAlphabet() == ilist.getTargetAlphabet());
Instance inst = ilist.get(i);
Labeling labeling = inst.getLabeling ();
FeatureVector fv = (FeatureVector) inst.getData ();
// xxx This assumes binary-valued features. What about "tied" weights?
for (int fl = 0; fl < fv.numLocations(); fl++) {
fli = fv.indexAtLocation(fl);
for (int li = 0; li < numClasses; li++) {
double modelLabelWeight = classifications[i].value(li);
double expalpha = Math.exp (alphas[li][fli]);
double numerator = modelLabelWeight * expalpha;
double denominator = numerator + (1.0 - modelLabelWeight);
dalphas[li][fli] -= numerator / denominator;
ddalphas[li][fli] += ((numerator*numerator) / (denominator*denominator)
- (numerator/denominator));
}
}
}
// We now now first- and second-derivative for this newton step
// Run tests on the alphas and their derivatives, and do a newton step
double alphachange, newalpha, oldalpha;
maxAlphachange = maxDalpha = 0;
for (int i = 0; i < numClasses; i++)
for (int j = 0; j < numFeatures; j++) {
alphachange = - (dalphas[i][j] / ddalphas[i][j]);
if (p[i][j] == 0 && q[i][j] == 0)
continue;
else if (false && (i*numFeatures+j) % (numClasses*numFeatures/2000) == 0
|| Double.isNaN(alphas[i][j]) || Double.isNaN(alphachange))
// Print just a sampling of them...
logger.info ("alpha["+i+"]["+j+"]="+alphas[i][j]+
" p="+p[i][j]+
" q="+q[i][j]+
" dalpha="+dalphas[i][j]+
" ddalpha="+ddalphas[i][j]+
" alphachange="+alphachange+
" min="+alphaMin[i][j]+
" max="+alphaMax[i][j]);
if (Double.isNaN(alphas[i][j]) || Double.isNaN(dalphas[i][j]) || Double.isNaN(ddalphas[i][j])
|| Double.isInfinite(alphas[i][j]) || Double.isInfinite(dalphas[i][j]) || Double.isInfinite(ddalphas[i][j]))
alphachange = 0;
// assert (!Double.isNaN(alphas[i][j]));
// assert (!Double.isNaN(dalphas[i][j]));
// assert (!Double.isNaN(ddalphas[i][j]));
oldalpha = alphas[i][j];
// xxx assert (ddalphas[i][j] <= 0);
//assert (Math.abs(alphachange) < 100.0) : alphachange; // xxx arbitrary?
// Trying to prevent a cycle
if (Math.abs(alphachange + alphaChangeOld[i][j]) / Math.abs(alphachange) < 0.01)
newalpha = alphas[i][j] + alphachange / 2;
else
newalpha = alphas[i][j] + alphachange;
if (alphachange < 0 && alphaMax[i][j] > alphas[i][j]) {
//System.out.println ("Updating alphaMax["+i+"]["+j+"] = "+alphas[i][j]);
alphaMax[i][j] = alphas[i][j];
}
if (alphachange > 0 && alphaMin[i][j] < alphas[i][j]) {
//System.out.println ("Updating alphaMin["+i+"]["+j+"] = "+alphas[i][j]);
alphaMin[i][j] = alphas[i][j];
}
if (newalpha <= alphaMax[i][j] && newalpha >= alphaMin[i][j])
// Newton wants to jump to a point inside the boundaries; let it
alphas[i][j] = newalpha;
else {
// Newton wants to jump to a point outside the boundaries; bisect instead
assert (alphaMax[i][j] != Double.POSITIVE_INFINITY);
assert (alphaMin[i][j] != Double.NEGATIVE_INFINITY);
alphas[i][j] = alphaMin[i][j] + (alphaMax[i][j] - alphaMin[i][j]) / 2;
//System.out.println ("Newton tried to exceed bounds; bisecting. dalphas["+i+"]["+j+"]="+dalphas[i][j]+" alphaMin="+alphaMin[i][j]+" alphaMax="+alphaMax[i][j]);
}
alphachange = alphas[i][j] - oldalpha;
if (Math.abs(alphachange) > maxAlphachange)
maxAlphachange = Math.abs (alphachange);
if (Math.abs (dalphas[i][j]) > maxDalpha)
maxDalpha = Math.abs (dalphas[i][j]);
alphaChangeOld[i][j] = alphachange;
}
logger.info ("After "+newton+" Newton iterations, maximum alphachange="+maxAlphachange+
" dalpha="+maxDalpha);
}
// Allow some memory to be freed
//q = null;
ddalphas = dalphas = alphaChangeOld = alphaMin = alphaMax = null;
// "q[e^{\alpha g}]", p.4
//System.out.println ("Calculating qeag...");
// Note that we are using a gaussian prior, so we don't multiply by (1/numInstances)
double[][] qeag = new double[numClasses][numFeatures];
for (int i = 0; i < ilist.size(); i++) {
assert (classifications[i].getLabelAlphabet() == ilist.getTargetAlphabet());
Instance inst = ilist.get(i);
Labeling labeling = inst.getLabeling ();
FeatureVector fv = (FeatureVector) inst.getData ();
int fvMaxLocation = fv.numLocations()-1;
for (int li = 0; li < numClasses; li++) {
double modelLabelWeight = classifications[i].value(li);
// Following line now done before outside of loop over instances
// for (int fi = 0; fi < numFeatures; fi++) qeag[li][fi] += modelLabelWeight; // * 1.0;
for (int fl = 0; fl < fv.numLocations(); fl++) {
fli = fv.indexAtLocation(fl);
// When the value of this feature "g" is zero, a value of 1.0 should be included
// in the expectation; we'll actually add all these at the end (pre-"assuming"
// that all features have value zero). Here we subtract the "assumed"
// modelLabelWeight, and put in the true value based on non-zero valued feature "g".
qeag[li][fli] += Math.log (modelLabelWeight * Math.exp (alphas[li][fli]) + (1-modelLabelWeight));
}
}
}
//System.out.println ("Calculating klgain values...");
double[] klgains = new double[numFeatures];
double klgainIncr, alpha;
for (int i = 0; i < numClasses; i++)
for (int j = 0; j < numFeatures; j++) {
assert (!Double.isInfinite(alphas[i][j]));
alpha = alphas[i][j];
if (alpha == 0)
continue;
klgainIncr = (alpha * p[i][j]) - qeag[i][j] - (alpha*alpha/(2*gaussianPriorVariance));
if (klgainIncr < 0) {
if (false)
logger.info ("WARNING: klgainIncr["+i+"]["+j+"]="+klgainIncr+
" alpha="+alphas[i][j]+
" feature="+ilist.getDataAlphabet().lookupObject(j)+
" class="+ilist.getTargetAlphabet().lookupObject(i));
} else
klgains[j] += klgainIncr;
}
if (false) {
logger.info ("klgains.length="+klgains.length);
for (int j = 0; j < numFeatures; j++) {
if (j % (numFeatures/100) == 0) {
for (int i = 0; i < numClasses; i++) {
logger.info ("c="+i+" p["+ilist.getDataAlphabet().lookupObject(j)+"] = "+p[i][j]);
logger.info ("c="+i+" q["+ilist.getDataAlphabet().lookupObject(j)+"] = "+q[i][j]);
logger.info ("c="+i+" alphas["+ilist.getDataAlphabet().lookupObject(j)+"] = "+alphas[i][j]);
logger.info ("c="+i+" qeag["+ilist.getDataAlphabet().lookupObject(j)+"] = "+qeag[i][j]);
}
logger.info ("klgains["+ilist.getDataAlphabet().lookupObject(j)+"] = "+klgains[j]);
}
}
}
return klgains;
}
public ExpGain (InstanceList ilist, LabelVector[] classifications, double gaussianPriorVariance)
{
super (ilist.getDataAlphabet(), calcExpGains (ilist, classifications, gaussianPriorVariance));
}
private static LabelVector[] getLabelVectorsFromClassifications (Classification[] c)
{
LabelVector[] ret = new LabelVector[c.length];
for (int i = 0; i < c.length; i++)
ret[i] = c[i].getLabelVector();
return ret;
}
public ExpGain (InstanceList ilist, Classification[] classifications, double gaussianPriorVariance)
{
super (ilist.getDataAlphabet(),
calcExpGains (ilist, getLabelVectorsFromClassifications(classifications), gaussianPriorVariance));
}
public static class Factory implements RankedFeatureVector.Factory
{
LabelVector[] classifications;
double gaussianPriorVariance = 10.0;
public Factory (LabelVector[] classifications)
{
this.classifications = classifications;
}
public Factory (LabelVector[] classifications,
double gaussianPriorVariance)
{
this.classifications = classifications;
this.gaussianPriorVariance = gaussianPriorVariance;
}
public RankedFeatureVector newRankedFeatureVector (InstanceList ilist)
{
assert (ilist.getTargetAlphabet() == classifications[0].getAlphabet());
return new ExpGain (ilist, classifications, gaussianPriorVariance);
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeInt(classifications.length);
for (int i = 0; i < classifications.length; i++)
out.writeObject(classifications[i]);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
int n = in.readInt();
this.classifications = new LabelVector[n];
for (int i = 0; i < n; i++)
this.classifications[i] = (LabelVector)in.readObject();
}
}
}
| 15,602 | 41.17027 | 166 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/FeatureCounts.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
The number of instances in which each feature occurs.
Note that we aren't attending to the feature's value, and MALLET doesn't currently
have any support at all for categorical features.
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
public class FeatureCounts extends RankedFeatureVector
{
// increment by 1 for each instance that has the feature, ignoring the feature's value
static boolean countInstances = true;
private static double[] calcFeatureCounts (InstanceList ilist)
{
int numInstances = ilist.size();
int numClasses = ilist.getTargetAlphabet().size();
int numFeatures = ilist.getDataAlphabet().size();
double[] counts = new double[numFeatures];
double count;
for (int i = 0; i < ilist.size(); i++) {
Instance inst = ilist.get(i);
if (!(inst.getData() instanceof FeatureVector))
throw new IllegalArgumentException ("Currently only handles FeatureVector data");
FeatureVector fv = (FeatureVector) inst.getData ();
if (ilist.getInstanceWeight(i) == 0)
continue;
for (int j = 0; j < fv.numLocations(); j++) {
if (countInstances)
counts[fv.indexAtLocation(j)] += 1;
else
counts[fv.indexAtLocation(j)] += fv.valueAtLocation(j);
}
}
return counts;
}
public FeatureCounts (InstanceList ilist)
{
super (ilist.getDataAlphabet(), calcFeatureCounts (ilist));
}
public FeatureCounts (Alphabet vocab, double[] counts)
{
super (vocab, counts);
}
public static class Factory implements RankedFeatureVector.Factory
{
public Factory ()
{
}
public RankedFeatureVector newRankedFeatureVector (InstanceList ilist)
{
return new FeatureCounts (ilist);
}
}
}
| 2,165 | 28.27027 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Sequence.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
// Immutable
public interface Sequence<E>
{
public int size ();
public E get (int index);
}
| 644 | 25.875 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Vector.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.util.Arrays;
import java.util.HashSet;
import java.util.HashMap;
import cc.mallet.types.Alphabet;
import cc.mallet.types.Matrix;
import cc.mallet.util.PropertyList;
// Could also be called by convention "Matrix1"
@Deprecated // Rarely used, and should be removed -akm 1/2008
public interface Vector extends ConstantMatrix
{
public double value (int index);
//public void setValue (int index, double value);
}
| 976 | 28.606061 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/AlphabetCarrying.java | package cc.mallet.types;
/** An interface for objects that contain one or more Alphabets.
* <p>
* The primary kind of type checking among MALLET objects such as Instances, InstanceLists, Classifiers, etc is
* by checking that their Alphabets match. */
public interface AlphabetCarrying {
Alphabet getAlphabet();
Alphabet[] getAlphabets();
//boolean alphabetsMatch (AlphabetCarrying object); //Now you should simply call the static method Alphabet.alphabetsMatch().
}
| 477 | 38.833333 | 127 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Matrixn.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types; // Generated package name
/**
* Implementation of Matrix that allows arbitrary
* number of dimensions. This implementation
* simply uses a flat array.
*
* This also provides static utilities for doing
* arbitrary-dimensional array indexing (see
* {@link #singleIndex}, {@link #singleToIndices}).
*
* Created: Tue Sep 16 14:52:37 2003
*
* @author <a href="mailto:[email protected]">Charles Sutton</a>
* @version $Id: Matrixn.java,v 1.1 2007/10/22 21:37:39 mccallum Exp $
*/
public class Matrixn extends DenseMatrix implements Cloneable {
int numDimensions;
int[] sizes;
/**
* Create a 1-d matrix with the given values.
*/
public Matrixn(double[] vals) {
numDimensions = 1;
sizes = new int[1];
sizes [0] = vals.length;
values = (double[]) vals.clone();
}
/**
* Create a matrix with the given dimensions.
*
* @param szs An array containing the maximum for
* each dimension.
*/
public Matrixn (int szs[]) {
numDimensions = szs.length;
// sizes = (int[])szs.clone();
sizes = szs;
int total = 1;
for (int j = 0; j < numDimensions; j++) {
total *= sizes [j];
}
values = new double [total];
}
/**
* Create a matrix with the given dimensions and
* the given values.
*
* @param szs An array containing the maximum for
* each dimension.
* @param vals A flat array of the entries of the
* matrix, in row-major order.
*/
public Matrixn (int[] szs, double[] vals) {
numDimensions = szs.length;
sizes = (int[])szs.clone();
values = (double[])vals.clone();
}
public int getNumDimensions () { return numDimensions; };
public int getDimensions (int [] szs) {
for ( int i = 0; i < numDimensions; i++ ) {
szs [i] = this.sizes [i];
}
return numDimensions;
}
public double value (int[] indices) {
return values [singleIndex (indices)];
}
public void setValue (int[] indices, double value) {
values [singleIndex (indices)] = value;
}
public ConstantMatrix cloneMatrix () {
/* The Matrixn constructor will clone the arrays. */
return new Matrixn (sizes, values);
}
public Object clone () {
return cloneMatrix();
}
public int singleIndex (int[] indices)
{
return singleIndex (sizes, indices);
}
// This is public static so it will be useful as a general
// dereferencing utility for multidimensional arrays.
public static int singleIndex (int[] szs, int[] indices)
{
int idx = 0;
for ( int dim = 0; dim < indices.length; dim++ ) {
idx = (idx * szs[dim]) + indices [dim];
}
return idx;
}
// NOTE: Cut-n-pasted to other singleToIndices method!!
public void singleToIndices (int single, int[] indices) {
/* must be a better way to do this... */
int size = 1;
for (int i = 0; i < numDimensions; i++) {
size *= sizes[i];
}
for ( int dim = 0; dim < numDimensions; dim++) {
size /= sizes [dim];
indices [dim] = single / size;
single = single % size;
}
}
/** Just a utility function for arbitrary-dimensional matrix
* dereferencing.
*/
// NOTE: Cut-n-paste from other singleToIndices method!!
public static void singleToIndices (int single, int[] indices, int[] szs) {
int numd = indices.length;
assert numd == szs.length;
/* must be a better way to do this... */
int size = 1;
for (int i = 0; i < numd; i++) {
size *= szs[i];
}
for ( int dim = 0; dim < numd; dim++) {
size /= szs [dim];
indices [dim] = single / size;
single = single % size;
}
}
public boolean equals (Object o) {
if (o instanceof Matrixn) {
/* This could be extended to work for all Matrixes. */
Matrixn m2 = (Matrixn) o;
return
(numDimensions == m2.numDimensions) &&
(sizes.equals (m2.sizes)) &&
(values.equals (m2.values));
} else {
return false;
}
}
/**
* Returns a one-dimensional array representation of the matrix.
* Caller must not modify the return value.
* @return An array of the values where index 0 is the major index, etc.
*/
public double[] toArray () {
return values;
}
/* Test array referencing and dereferencing */
public static void main(String[] args) {
double m1[] = new double[] { 1.0, 2.0, 3.0, 4.0 };
int idx1[] = new int[1];
Matrixn a = new Matrixn (m1);
System.out.println("Checking 1-D case");
a.singleToIndices (3, idx1);
System.out.println(idx1[0]);
System.out.println (a.singleIndex (idx1));
System.out.println ("Checking 2-D case");
int sizes[] = new int[] { 2, 3 };
m1 = new double [6];
for (int i = 0; i < 6; i++) {
m1 [i] = 2.0 * i;
}
a = new Matrixn (sizes, m1);
idx1 = new int [2];
a.singleToIndices (5, idx1);
System.out.println("5 => (" + idx1[0] + ", " + idx1[1] + ") => " +
a.singleIndex (idx1) );
System.out.println(a.value (idx1));
System.out.println("Checking 3-D case");
sizes = new int[] { 2, 3, 4 };
idx1 = new int[3];
m1 = new double [24];
for (int i = 0; i < 24; i++) {
m1 [i] = 2.0 * i;
}
a = new Matrixn (sizes, m1);
a.singleToIndices (21, idx1);
System.out.println ("21 => (" + idx1[0] + " " + idx1[1] + " " +
idx1[2] + ") =>" + a.singleIndex (idx1));
System.out.println(a.value (idx1));
}
// serialization garbage
private static final long serialVersionUID = 7963668115823191655L;
}
| 5,815 | 26.695238 | 76 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Alphabet.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.util.ArrayList;
import java.io.*;
import java.util.Iterator;
import java.util.HashMap;
import java.rmi.dgc.VMID;
/**
* A mapping between integers and objects where the mapping in each
* direction is efficient. Integers are assigned consecutively, starting
* at zero, as objects are added to the Alphabet. Objects can not be
* deleted from the Alphabet and thus the integers are never reused.
* <p>
* The most common use of an alphabet is as a dictionary of feature names
* associated with a {@link cc.mallet.types.FeatureVector} in an
* {@link cc.mallet.types.Instance}. In a simple document
* classification usage,
* each unique word in a document would be a unique entry in the Alphabet
* with a unique integer associated with it. FeatureVectors rely on
* the integer part of the mapping to efficiently represent the subset of
* the Alphabet present in the FeatureVector.
* @see FeatureVector
* @see Instance
* @see cc.mallet.pipe.Pipe
*/
public class Alphabet implements Serializable
{
gnu.trove.TObjectIntHashMap map;
ArrayList entries;
boolean growthStopped = false;
Class entryClass = null;
VMID instanceId = new VMID(); //used in readResolve to identify persitent instances
public Alphabet (int capacity, Class entryClass)
{
this.map = new gnu.trove.TObjectIntHashMap (capacity);
this.entries = new ArrayList (capacity);
this.entryClass = entryClass;
// someone could try to deserialize us into this image (e.g., by RMI). Handle this.
deserializedEntries.put (instanceId, this);
}
public Alphabet (Class entryClass)
{
this (8, entryClass);
}
public Alphabet (int capacity)
{
this (capacity, null);
}
public Alphabet ()
{
this (8, null);
}
public Alphabet (Object[] entries) {
this (entries.length);
for (Object entry : entries)
this.lookupIndex(entry);
}
public Object clone ()
{
//try {
// Wastes effort, because we over-write ivars we create
Alphabet ret = new Alphabet ();
ret.map = (gnu.trove.TObjectIntHashMap) map.clone();
ret.entries = (ArrayList) entries.clone();
ret.growthStopped = growthStopped;
ret.entryClass = entryClass;
return ret;
//} catch (CloneNotSupportedException e) {
//e.printStackTrace();
//throw new IllegalStateException ("Couldn't clone InstanceList Vocabuary");
//}
}
/** Return -1 if entry isn't present. */
public int lookupIndex (Object entry, boolean addIfNotPresent)
{
if (entry == null)
throw new IllegalArgumentException ("Can't lookup \"null\" in an Alphabet.");
if (entryClass == null)
entryClass = entry.getClass();
else
// Insist that all entries in the Alphabet are of the same
// class. This may not be strictly necessary, but will catch a
// bunch of easily-made errors.
if (entry.getClass() != entryClass)
throw new IllegalArgumentException ("Non-matching entry class, "+entry.getClass()+", was "+entryClass);
int retIndex = -1;
if (map.containsKey( entry )) {
retIndex = map.get( entry );
}
else if (!growthStopped && addIfNotPresent) {
retIndex = entries.size();
map.put (entry, retIndex);
entries.add (entry);
}
return retIndex;
}
public int lookupIndex (Object entry)
{
return lookupIndex (entry, true);
}
public Object lookupObject (int index)
{
return entries.get(index);
}
public Object[] toArray () {
return entries.toArray();
}
/**
* Returns an array containing all the entries in the Alphabet.
* The runtime type of the returned array is the runtime type of in.
* If in is large enough to hold everything in the alphabet, then it
* it used. The returned array is such that for all entries <tt>obj</tt>,
* <tt>ret[lookupIndex(obj)] = obj</tt> .
*/
public Object[] toArray (Object[] in) {
return entries.toArray (in);
}
// xxx This should disable the iterator's remove method...
public Iterator iterator () {
return entries.iterator();
}
public Object[] lookupObjects (int[] indices)
{
Object[] ret = new Object[indices.length];
for (int i = 0; i < indices.length; i++)
ret[i] = entries.get(indices[i]);
return ret;
}
/**
* Returns an array of the objects corresponding to
* @param indices An array of indices to look up
* @param buf An array to store the returned objects in.
* @return An array of values from this Alphabet. The runtime type of the array is the same as buf
*/
public Object[] lookupObjects (int[] indices, Object[] buf)
{
for (int i = 0; i < indices.length; i++)
buf[i] = entries.get(indices[i]);
return buf;
}
public int[] lookupIndices (Object[] objects, boolean addIfNotPresent)
{
int[] ret = new int[objects.length];
for (int i = 0; i < objects.length; i++)
ret[i] = lookupIndex (objects[i], addIfNotPresent);
return ret;
}
public boolean contains (Object entry)
{
return map.contains (entry);
}
public int size ()
{
return entries.size();
}
public void stopGrowth ()
{
growthStopped = true;
}
public void startGrowth ()
{
growthStopped = false;
}
public boolean growthStopped ()
{
return growthStopped;
}
public Class entryClass ()
{
return entryClass;
}
/** Return String representation of all Alphabet entries, each
separated by a newline. */
public String toString()
{
StringBuffer sb = new StringBuffer();
for (int i = 0; i < entries.size(); i++) {
sb.append (entries.get(i).toString());
sb.append ('\n');
}
return sb.toString();
}
public void dump () { dump (System.out); }
public void dump (PrintStream out)
{
dump (new PrintWriter (new OutputStreamWriter (out), true));
}
public void dump (PrintWriter out)
{
for (int i = 0; i < entries.size(); i++) {
out.println (i+" => "+entries.get (i));
}
}
/** Convenience method that can often implement alphabetsMatch in classes that implement the AlphabetsCarrying interface. */
public static boolean alphabetsMatch (AlphabetCarrying object1, AlphabetCarrying object2) {
Alphabet[] a1 = object1.getAlphabets();
Alphabet[] a2 = object2.getAlphabets();
if (a1.length != a2.length) return false;
for (int i = 0; i < a1.length; i++) {
if (a1[i] == a2[i]) continue;
if (a1[i] == null || a2[i] == null) return false; // One is null, but the other isn't
if (! a1[i].equals(a2[i])) return false;
}
return true;
}
public VMID getInstanceId() { return instanceId;} // for debugging
public void setInstanceId(VMID id) { this.instanceId = id; }
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeInt (entries.size());
for (int i = 0; i < entries.size(); i++)
out.writeObject (entries.get(i));
out.writeBoolean (growthStopped);
out.writeObject (entryClass);
out.writeObject(instanceId);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
int size = in.readInt();
entries = new ArrayList (size);
map = new gnu.trove.TObjectIntHashMap (size);
for (int i = 0; i < size; i++) {
Object o = in.readObject();
map.put (o, i);
entries. add (o);
}
growthStopped = in.readBoolean();
entryClass = (Class) in.readObject();
if (version >0 ){ // instanced id added in version 1S
instanceId = (VMID) in.readObject();
}
}
private transient static HashMap deserializedEntries = new HashMap();
/**
* This gets called after readObject; it lets the object decide whether
* to return itself or return a previously read in version.
* We use a hashMap of instanceIds to determine if we have already read
* in this object.
* @return
* @throws ObjectStreamException
*/
public Object readResolve() throws ObjectStreamException {
Object previous = deserializedEntries.get(instanceId);
if (previous != null){
//System.out.println(" ***Alphabet ReadResolve:Resolving to previous instance. instance id= " + instanceId);
return previous;
}
if (instanceId != null){
deserializedEntries.put(instanceId, this);
}
//System.out.println(" *** Alphabet ReadResolve: new instance. instance id= " + instanceId);
return this;
}
}
| 8,801 | 27.764706 | 125 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/PropertyHolder.java | package cc.mallet.types;
import cc.mallet.util.PropertyList;
/**
* Author: saunders Created Nov 15, 2005 Copyright (C) Univ. of Massachusetts Amherst, Computer Science Dept.
*/
public interface PropertyHolder {
public void setProperty(String key, Object value);
public Object getProperty(String key);
public void setNumericProperty(String key, double value);
public double getNumericProperty(String key);
public PropertyList getProperties();
public void setProperties(PropertyList newProperties);
public boolean hasProperty(String key);
public void setFeatureValue (String key, double value);
public double getFeatureValue (String key);
public PropertyList getFeatures ();
public void setFeatures (PropertyList pl);
public FeatureVector toFeatureVector (Alphabet dict, boolean binary);
}
| 816 | 23.757576 | 109 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/InstanceList.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.ObjectStreamException;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.logging.Logger;
import cc.mallet.pipe.FeatureSequence2FeatureVector;
import cc.mallet.pipe.Noop;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.SerialPipes;
import cc.mallet.pipe.Target2Label;
import cc.mallet.pipe.TokenSequence2FeatureSequence;
import cc.mallet.pipe.iterator.RandomTokenSequenceIterator;
import cc.mallet.util.MalletLogger;
import cc.mallet.util.Randoms;
/**
A list of machine learning instances, typically used for training
or testing of a machine learning algorithm.
<p>
All of the instances in the list will have been passed through the
same {@link cc.mallet.pipe.Pipe}, and thus must also share the same data and target Alphabets.
InstanceList keeps a reference to the pipe and the two alphabets.
<p>
The most common way of adding instances to an InstanceList is through
the <code>add(PipeInputIterator)</code> method. PipeInputIterators are a way of mapping general
data sources into instances suitable for processing through a pipe.
As each {@link cc.mallet.types.Instance} is pulled from the PipeInputIterator, the InstanceList
copies the instance and runs the copy through its pipe (with resultant
destructive modifications) before saving the modified instance on its list.
This is the usual way in which instances are transformed by pipes.
<p>
InstanceList also contains methods for randomly generating lists of
feature vectors; splitting lists into non-overlapping subsets (useful
for test/train splits), and iterators for cross validation.
@see Instance
@see Pipe
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
public class InstanceList extends ArrayList<Instance> implements Serializable, Iterable<Instance>, AlphabetCarrying
{
private static Logger logger = MalletLogger.getLogger(InstanceList.class.getName());
HashMap<Instance, Double> instWeights = null;
// This should never be set by a ClassifierTrainer, it should be used in conjunction with a Classifier's FeatureSelection
// Or perhaps it should be removed from here, and there should be a ClassifierTrainer.train(InstanceList, FeatureSelection) method.
FeatureSelection featureSelection = null;
FeatureSelection[] perLabelFeatureSelection = null;
Pipe pipe;
Alphabet dataAlphabet, targetAlphabet;
Class dataClass = null;
Class targetClass = null;
/**
* Construct an InstanceList having given capacity, with given default pipe.
* Typically Instances added to this InstanceList will have gone through the
* pipe (for example using instanceList.addThruPipe); but this is not required.
* This InstanaceList will obtain its dataAlphabet and targetAlphabet from the pipe.
* It is required that all Instances in this InstanceList share these Alphabets.
* @param pipe The default pipe used to process instances added via the addThruPipe methods.
* @param capacity The initial capacity of the list; will grow further as necessary.
*/
// XXX not very useful, should perhaps be removed
public InstanceList (Pipe pipe, int capacity)
{
super(capacity);
this.pipe = pipe;
}
/**
* Construct an InstanceList with initial capacity of 10, with given default pipe.
* Typically Instances added to this InstanceList will have gone through the
* pipe (for example using instanceList.addThruPipe); but this is not required.
* This InstanaceList will obtain its dataAlphabet and targetAlphabet from the pipe.
* It is required that all Instances in this InstanceList share these Alphabets.
* @param pipe The default pipe used to process instances added via the addThruPipe methods.
*/
public InstanceList (Pipe pipe)
{
this (pipe, 10);
}
/**
* Construct an InstanceList with initial capacity of 10, with a Noop default pipe.
* Used in those infrequent circumstances when Instances typically would not have further
* processing, and objects containing vocabularies are entered
* directly into the <code>InstanceList</code>; for example, the creation of a
* random <code>InstanceList</code> using <code>Dirichlet</code>s and
* <code>Multinomial</code>s.</p>
*
* @param dataAlphabet The vocabulary for added instances' data fields
* @param targetAlphabet The vocabulary for added instances' targets
*/
public InstanceList (Alphabet dataAlphabet, Alphabet targetAlphabet)
{
this (new Noop(dataAlphabet, targetAlphabet), 10);
this.dataAlphabet = dataAlphabet;
this.targetAlphabet = targetAlphabet;
}
private static class NotYetSetPipe extends Pipe {
public Instance pipe (Instance carrier) {
throw new UnsupportedOperationException (
"The InstanceList has yet to have its pipe set; "+
"this could happen by calling InstanceList.add(InstanceList)");
}
public Object readResolve () throws ObjectStreamException {
return notYetSetPipe;
}
private static final long serialVersionUID = 1;
}
static final Pipe notYetSetPipe = new NotYetSetPipe();
/** Creates a list that will have its pipe set later when its first Instance is added. */
@Deprecated // Pipe is never set if you use this constructor
public InstanceList ()
{
this (notYetSetPipe);
}
/**
* Creates a list consisting of randomly-generated
* <code>FeatureVector</code>s.
*/
// xxx Perhaps split these out into a utility class
public InstanceList (Randoms r,
// the generator of all random-ness used here
Dirichlet classCentroidDistribution,
// includes a Alphabet
double classCentroidAverageAlphaMean,
// Gaussian mean on the sum of alphas
double classCentroidAverageAlphaVariance,
// Gaussian variance on the sum of alphas
double featureVectorSizePoissonLambda,
double classInstanceCountPoissonLambda,
String[] classNames)
{
this (new SerialPipes (new Pipe[] {
new TokenSequence2FeatureSequence (),
new FeatureSequence2FeatureVector (),
new Target2Label()}));
//classCentroidDistribution.print();
Iterator<Instance> iter = new RandomTokenSequenceIterator (
r, classCentroidDistribution,
classCentroidAverageAlphaMean, classCentroidAverageAlphaVariance,
featureVectorSizePoissonLambda, classInstanceCountPoissonLambda,
classNames);
this.addThruPipe (iter);
}
private static Alphabet dictOfSize (int size)
{
Alphabet ret = new Alphabet ();
for (int i = 0; i < size; i++)
ret.lookupIndex ("feature"+i);
return ret;
}
private static String[] classNamesOfSize (int size)
{
String[] ret = new String[size];
for (int i = 0; i < size; i++)
ret[i] = "class"+i;
return ret;
}
public InstanceList (Randoms r, Alphabet vocab, String[] classNames, int meanInstancesPerLabel)
{
this (r, new Dirichlet(vocab, 2.0),
30, 0,
10, meanInstancesPerLabel, classNames);
}
public InstanceList (Randoms r, int vocabSize, int numClasses)
{
this (r, new Dirichlet(dictOfSize(vocabSize), 2.0),
30, 0,
10, 20, classNamesOfSize(numClasses));
}
public InstanceList shallowClone ()
{
InstanceList ret = new InstanceList (pipe, this.size());
for (int i = 0; i < this.size(); i++)
ret.add (get(i));
if (instWeights == null)
ret.instWeights = null;
else
ret.instWeights = (HashMap<Instance,Double>) instWeights.clone();
// Should we really be so shallow as to not make new copies of these following instance variables? -akm 1/2008
ret.featureSelection = featureSelection;
ret.perLabelFeatureSelection = perLabelFeatureSelection;
ret.pipe = pipe;;
ret.dataAlphabet = dataAlphabet;
ret.targetAlphabet = targetAlphabet;
ret.dataClass = dataClass;
ret.targetClass = targetClass;
return ret;
}
public Object clone ()
{
return shallowClone();
}
public InstanceList subList (int start, int end)
{
InstanceList other = this.cloneEmpty();
for (int i = start; i < end; i++) {
other.add (get (i));
}
return other;
}
public InstanceList subList (double proportion)
{
if (proportion > 1.0)
throw new IllegalArgumentException ("proportion must by <= 1.0");
InstanceList other = (InstanceList) clone();
other.shuffle(new java.util.Random());
proportion *= other.size();
for (int i = 0; i < proportion; i++)
other.add (get(i));
return other;
}
/** Adds to this list every instance generated by the iterator,
* passing each one through this InstanceList's pipe. */
// TODO This method should be renamed addPiped(Iterator<Instance> ii)
public void addThruPipe (Iterator<Instance> ii)
{
//for debug
Iterator<Instance> pipedInstanceIterator = pipe.newIteratorFrom(ii);
while (pipedInstanceIterator.hasNext())
{
add (pipedInstanceIterator.next());
//System.out.println("Add instance " + pipedInstanceIterator.next().getName());
}
}
// gsc: method to add one instance at a time
/** Adds the input instance to this list, after passing it through the
* InstanceList's pipe.
* <p>
* If several instances are to be added then accumulate them in a List\<Instance\>
* and use <tt>addThruPipe(Iterator<Instance>)</tt> instead.
*/
public void addThruPipe(Instance inst)
{
addThruPipe(new SingleInstanceIterator(inst));
}
/** Constructs and appends an instance to this list, passing it through this
* list's pipe and assigning it the specified weight.
* @return <code>true</code>
* @deprecated Use trainingset.addThruPipe (new Instance(data,target,name,source)) instead.
*/
@Deprecated
public boolean add (Object data, Object target, Object name, Object source, double instanceWeight)
{
Instance inst = new Instance (data, target, name, source);
Iterator<Instance> ii = pipe.newIteratorFrom(new SingleInstanceIterator(inst));
if (ii.hasNext()) {
add (ii.next(), instanceWeight);
return true;
}
return false;
}
/** Constructs and appends an instance to this list, passing it through this
* list's pipe. Default weight is 1.0.
* @return <code>true</code>
* @deprecated Use trainingset.add (new Instance(data,target,name,source)) instead.
*/
@Deprecated
public boolean add (Object data, Object target, Object name, Object source)
{
return add (data, target, name, source, 1.0);
}
/** Appends the instance to this list without passing the instance through
* the InstanceList's pipe.
* The alphabets of this Instance must match the alphabets of this InstanceList.
* @return <code>true</code>
*/
public boolean add (Instance instance)
{
if (dataAlphabet == null)
dataAlphabet = instance.getDataAlphabet();
if (targetAlphabet == null)
targetAlphabet = instance.getTargetAlphabet();
if (!Alphabet.alphabetsMatch(this, instance)) {
// gsc
Alphabet data_alphabet = instance.getDataAlphabet();
Alphabet target_alphabet = instance.getTargetAlphabet();
StringBuilder sb = new StringBuilder();
sb.append("Alphabets don't match: ");
sb.append("Instance: [" + (data_alphabet == null ? null : data_alphabet.size()) + ", " +
(target_alphabet == null ? null : target_alphabet.size()) + "], ");
data_alphabet = this.getDataAlphabet();
target_alphabet = this.getTargetAlphabet();
sb.append("InstanceList: [" + (data_alphabet == null ? null : data_alphabet.size()) + ", " +
(target_alphabet == null ? null : target_alphabet.size()) + "]\n");
throw new IllegalArgumentException(sb.toString());
// throw new IllegalArgumentException ("Alphabets don't match: Instance: "+
// instance.getAlphabets()+" InstanceList: "+this.getAlphabets());
}
if (dataClass == null) {
dataClass = instance.data.getClass();
if (pipe != null && pipe.isTargetProcessing())
if (instance.target != null)
targetClass = instance.target.getClass();
}
// Once it is added to an InstanceList, generally-speaking, the Instance shouldn't change.
// There are exceptions, and for these you can instance.unlock(), then instance.lock() again.
instance.lock();
return super.add (instance);
}
/** Appends the instance to this list without passing it through this
* InstanceList's pipe, assigning it the specified weight.
* @return <code>true</code>
*/
public boolean add (Instance instance, double instanceWeight)
{
// Call the add method above and make sure we
// correctly handle adding the first instance to this list
boolean ret = this.add(instance);
if (!ret)
// If for some reason a subclass of InstanceList refuses to add this Instance, be sure not to do the rest.
return ret;
if (instanceWeight != 1.0) { // Default weight is 1.0 for everything not in the HashMap.
if (instWeights == null)
instWeights = new HashMap<Instance,Double>();
else if (instWeights.get(instance) != null)
throw new IllegalArgumentException ("You cannot add the same instance twice to an InstanceList when it has non-1.0 weight. "+
"Trying adding instance.shallowCopy() instead.");
instWeights.put(instance, instanceWeight);
}
return ret;
}
private void prepareToRemove (Instance instance) {
if (instWeights != null)
instWeights.remove(instance);
}
public Instance set (int index, Instance instance) {
prepareToRemove(get(index));
return super.set (index, instance);
}
public void add (int index, Instance element) {
throw new IllegalStateException ("Not yet implemented.");
}
public Instance remove (int index) {
prepareToRemove (get(index));
return super.remove(index);
}
public boolean remove (Instance instance) {
prepareToRemove (instance);
return super.remove(instance);
}
public boolean addAll (Collection<? extends Instance> instances) {
for (Instance instance : instances)
this.add (instance);
return true;
}
public boolean addAll(int index, Collection <? extends Instance> c) {
throw new IllegalStateException ("addAll(int,Collection) not supported by InstanceList.n");
}
public void clear() {
super.clear();
instWeights.clear();
// But retain all other instance variables.
}
@Deprecated // Remove this. It seems like too specialized behavior to be implemented here.
// Intentionally add some noise into the data.
// return the real random ratio
// added by Fuchun Peng, Sept. 2003
public double noisify(double ratio)
{
// ArrayList new_instances = new ArrayList( instances.size() );
assert(ratio >= 0 && ratio <= 1);
int instance_size = this.size();
int noise_instance_num = (int)( ratio * instance_size);
java.util.Random r = new java.util.Random ();
// System.out.println(noise_instance_num + "/" + instance_size);
// gsc: parameterizing...
List<Integer> randnumlist = new ArrayList<Integer>(noise_instance_num);
for(int i=0; i<noise_instance_num; i++){
int randIndex = r.nextInt(instance_size);
// System.out.println(i + ": " + randIndex );
Integer nn = new Integer(randIndex);
if(randnumlist.indexOf(nn) != -1){
i--;
}
else{
randnumlist.add(nn);
}
}
LabelAlphabet targets = (LabelAlphabet) pipe.getTargetAlphabet();
int realRandNum = 0;
// for(int i=0; i<randnumlist.size(); i++){
// int index = ((Integer)randnumlist.get(i)).intValue();
for (Integer index : randnumlist) {
Instance inst = get( index );
int randIndex = r.nextInt( targets.size() );
// System.out.println(i + ": " + index +": " + inst.getTarget().toString()
// + " : " + targets.lookupLabel(randIndex) );
String oldTargetStr = inst.getTarget().toString();
String newTargetStr = targets.lookupLabel(randIndex).toString();
if(!oldTargetStr.equals(newTargetStr)){
inst.unLock();
inst.setTarget(targets.lookupLabel(randIndex));
inst.lock();
realRandNum ++;
}
// System.out.println(i + ": " + index +": " + inst.getTarget().toString()
// + " : " + targets.lookupObject(randIndex) );
setInstance(index, inst);
}
double realRatio = (double)realRandNum/instance_size;
return realRatio;
}
public InstanceList cloneEmpty () {
return cloneEmptyInto (new InstanceList (pipe));
}
// A precursor to cloning subclasses of InstanceList
protected InstanceList cloneEmptyInto (InstanceList ret)
{
ret.instWeights = null; // Don't copy these, because its empty! instWeights == null ? null : (HashMap<Instance,Double>) instWeights.clone();
// xxx Should the featureSelection and perLabel... be cloned?
// Note that RoostingTrainer currently depends on not cloning its splitting.
ret.featureSelection = this.featureSelection;
ret.perLabelFeatureSelection = this.perLabelFeatureSelection;
ret.dataClass = this.dataClass;
ret.targetClass = this.targetClass;
ret.dataAlphabet = this.dataAlphabet;
ret.targetAlphabet = this.targetAlphabet;
return ret;
}
public void shuffle (java.util.Random r) {
Collections.shuffle (this, r);
}
/**
* Shuffles the elements of this list among several smaller lists.
* @param proportions A list of numbers (not necessarily summing to 1) which,
* when normalized, correspond to the proportion of elements in each returned
* sublist. This method (and all the split methods) do not transfer the Instance
* weights to the resulting InstanceLists.
* @param r The source of randomness to use in shuffling.
* @return one <code>InstanceList</code> for each element of <code>proportions</code>
*/
public InstanceList[] split (java.util.Random r, double[] proportions) {
InstanceList shuffled = this.shallowClone();
shuffled.shuffle (r);
return shuffled.splitInOrder(proportions);
}
public InstanceList[] split (double[] proportions) {
return split (new java.util.Random(System.currentTimeMillis()), proportions);
}
/** Chops this list into several sequential sublists.
* @param proportions A list of numbers corresponding to the proportion of
* elements in each returned sublist. If not already normalized to sum to 1.0, it will be normalized here.
* @return one <code>InstanceList</code> for each element of <code>proportions</code>
*/
public InstanceList[] splitInOrder (double[] proportions) {
InstanceList[] ret = new InstanceList[proportions.length];
double maxind[] = proportions.clone();
MatrixOps.normalize(maxind);
for (int i = 0; i < maxind.length; i++) {
ret[i] = this.cloneEmpty(); // Note that we are passing on featureSelection here.
if (i > 0)
maxind[i] += maxind[i-1];
}
for (int i = 0; i < maxind.length; i++) {
// Fill maxind[] with the highest instance index to go in each corresponding returned InstanceList
maxind[i] = Math.rint (maxind[i] * this.size());
}
for (int i = 0, j = 0; i < size(); i++) {
// This gives a slight bias toward putting an extra instance in the last InstanceList.
while (i >= maxind[j] && j < ret.length)
j++;
ret[j].add(this.get(i));
}
return ret;
}
public InstanceList[] splitInOrder (int[] counts) {
InstanceList[] ret = new InstanceList[counts.length];
// Will leave ununsed instances if sum of counts[] != this.size()!
int idx = 0;
for (int num = 0; num < counts.length; num++){
ret[num] = cloneEmpty();
for (int i = 0; i < counts[num]; i++){
ret[num].add (get(idx)); // Transfer weights?
idx++;
}
}
return ret;
}
/** Returns a pair of new lists such that the first list in the pair contains
* every <code>m</code>th element of this list, starting with the first.
* The second list contains all remaining elements.
*/
public InstanceList[] splitInTwoByModulo (int m)
{
InstanceList[] ret = new InstanceList[2];
ret[0] = this.cloneEmpty();
ret[1] = this.cloneEmpty();
for (int i = 0; i < this.size(); i++) {
if (i % m == 0)
ret[0].add (this.get(i));
else
ret[1].add (this.get(i));
}
return ret;
}
public InstanceList sampleWithReplacement (java.util.Random r, int numSamples)
{
InstanceList ret = this.cloneEmpty();
for (int i = 0; i < numSamples; i++)
ret.add (this.get(r.nextInt(this.size())));
return ret;
}
/**
* Returns an <code>InstanceList</code> of the same size, where the instances come from the
* random sampling (with replacement) of this list using the instance weights.
* The new instances all have their weights set to one.
*/
// added by Gary - [email protected]
@Deprecated
// Move to InstanceListUtils
public InstanceList sampleWithInstanceWeights(java.util.Random r)
{
double[] weights = new double[size()];
for (int i = 0; i < weights.length; i++)
weights[i] = getInstanceWeight(i);
return sampleWithWeights(r, weights);
}
/**
* Returns an <code>InstanceList</code> of the same size, where the instances come from the
* random sampling (with replacement) of this list using the given weights.
* The length of the weight array must be the same as the length of this list
* The new instances all have their weights set to one.
*/
// added by Gary - [email protected]
public InstanceList sampleWithWeights (java.util.Random r, double[] weights)
{
if (weights.length != size())
throw new IllegalArgumentException("length of weight vector must equal number of instances");
if (size() == 0)
return cloneEmpty();
double sumOfWeights = 0;
for (int i = 0; i < size(); i++) {
if (weights[i] < 0)
throw new IllegalArgumentException("weight vector must be non-negative");
sumOfWeights += weights[i];
}
if (sumOfWeights <= 0)
throw new IllegalArgumentException("weights must sum to positive value");
InstanceList newList = new InstanceList(getPipe(), size());
double[] probabilities = new double[size()];
double sumProbs = 0;
for (int i = 0; i < size(); i++) {
sumProbs += r.nextDouble();
probabilities[i] = sumProbs;
}
MatrixOps.timesEquals(probabilities, sumOfWeights / sumProbs);
// make sure rounding didn't mess things up
probabilities[size() - 1] = sumOfWeights;
// do sampling
int a = 0; int b = 0; sumProbs = 0;
while (a < size() && b < size()) {
sumProbs += weights[b];
while (a < size() && probabilities[a] <= sumProbs) {
newList.add(get(b));
newList.setInstanceWeight(a, 1);
a++;
}
b++;
}
return newList;
}
/** Returns the Java Class 'data' field of Instances in this list. */
public Class getDataClass () {
return dataClass;
}
/** Returns the Java Class 'target' field of Instances in this list. */
public Class getTargetClass () {
return targetClass;
}
//added by Fuchun
/** Replaces the <code>Instance</code> at position <code>index</code>
* with a new one. */
public void setInstance (int index, Instance instance)
{
assert (this.getDataAlphabet().equals(instance.getDataAlphabet()));
assert (this.getTargetAlphabet().equals(instance.getTargetAlphabet()));
this.set(index, instance);
}
public double getInstanceWeight (Instance instance) {
if (instWeights != null) {
Double value = instWeights.get(instance);
if (value != null) {
return value;
}
}
return 1.0;
}
public double getInstanceWeight (int index) {
if (index > this.size()) {
throw new IllegalArgumentException("Index out of bounds: index="+index+" size="+this.size());
}
if (instWeights != null) {
Double value = instWeights.get(get(index));
if (value != null) {
return value;
}
}
return 1.0;
}
public void setInstanceWeight (int index, double weight) {
setInstanceWeight(get(index), weight);
}
public void setInstanceWeight (Instance instance, double weight) {
// Weights of 1.0 are not explicitly stored in the hash.
if (weight == 1.0) {
// If the weights hash does not exist, we are done.
if (instWeights == null) { return; }
// Otherwise, see if there is a weight currently set.
Double value = instWeights.get(instance);
// If there is no value set or the value is 1.0, we're done.
if (value == null || value.doubleValue() == weight) { return; }
// Otherwise remove the value
instWeights.remove(instance);
}
else {
// Initialize the weights hash if it does not exist
if (instWeights == null) {
instWeights = new HashMap<Instance,Double> ();
}
// Add the new value, overriding any previous value
instWeights.put(instance, weight);
}
}
public void setFeatureSelection (FeatureSelection selectedFeatures)
{
if (selectedFeatures != null
&& selectedFeatures.getAlphabet() != null // xxx We allow a null vocabulary here? See CRF3.java
&& selectedFeatures.getAlphabet() != getDataAlphabet())
throw new IllegalArgumentException ("Vocabularies do not match");
featureSelection = selectedFeatures;
}
public FeatureSelection getFeatureSelection ()
{
return featureSelection;
}
public void setPerLabelFeatureSelection (FeatureSelection[] selectedFeatures)
{
if (selectedFeatures != null) {
for (int i = 0; i < selectedFeatures.length; i++)
if (selectedFeatures[i].getAlphabet() != getDataAlphabet())
throw new IllegalArgumentException ("Vocabularies do not match");
}
perLabelFeatureSelection = selectedFeatures;
}
public FeatureSelection[] getPerLabelFeatureSelection ()
{
return perLabelFeatureSelection;
}
/** Sets the "target" field to <code>null</code> in all instances. This makes unlabeled data. */
public void removeTargets()
{
for (Instance instance : this)
instance.setTarget (null);
}
/** Sets the "source" field to <code>null</code> in all instances. This will often save memory when
the raw data had been placed in that field. */
public void removeSources()
{
for (int i = 0; i < this.size(); i++)
get(i).clearSource();
}
/** Constructs a new <code>InstanceList</code>, deserialized from <code>file</code>. If the
string value of <code>file</code> is "-", then deserialize from {@link System.in}. */
public static InstanceList load (File file)
{
try {
ObjectInputStream ois;
if (file.toString().equals("-"))
ois = new ObjectInputStream (System.in);
else
ois = new ObjectInputStream (new BufferedInputStream(new FileInputStream (file)));
InstanceList ilist = (InstanceList) ois.readObject();
ois.close();
return ilist;
} catch (Exception e) {
e.printStackTrace();
throw new IllegalArgumentException ("Couldn't read InstanceList from file "+file);
}
}
/** Saves this <code>InstanceList</code> to <code>file</code>.
If the string value of <code>file</code> is "-", then
serialize to {@link System.out}. */
public void save (File file)
{
try {
ObjectOutputStream ois;
if (file.toString().equals("-"))
ois = new ObjectOutputStream (System.out);
else
ois = new ObjectOutputStream (new FileOutputStream (file));
ois.writeObject(this);
ois.close();
} catch (Exception e) {
e.printStackTrace();
throw new IllegalArgumentException ("Couldn't save InstanceList to file "+file);
}
}
// Serialization of InstanceList
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException {
int i, size;
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject(instWeights);
out.writeObject(pipe);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int i, size;
int version = in.readInt ();
instWeights = (HashMap<Instance,Double>) in.readObject();
pipe = (Pipe) in.readObject();
}
// added - [email protected]
/**
<code>CrossValidationIterator</code> allows iterating over pairs of
<code>InstanceList</code>, where each pair is split into training/testing
based on nfolds.
*/
public class CrossValidationIterator implements java.util.Iterator<InstanceList[]>, Serializable
{
int nfolds;
InstanceList[] folds;
int index;
/**
@param _nfolds number of folds to split InstanceList into
@param seed seed for random number used to split InstanceList
*/
public CrossValidationIterator (int _nfolds, int seed)
{
assert (_nfolds > 0) : "nfolds: " + nfolds;
this.nfolds = _nfolds;
this.index = 0;
folds = new InstanceList[_nfolds];
double fraction = (double) 1 / _nfolds;
double[] proportions = new double[_nfolds];
for (int i=0; i < _nfolds; i++)
proportions[i] = fraction;
folds = split (new java.util.Random (seed), proportions);
}
public CrossValidationIterator (int _nfolds) {
this (_nfolds, 1);
}
public boolean hasNext () { return index < nfolds; }
/**
* Returns the next training/testing split.
* @return A pair of lists, where <code>InstanceList[0]</code> is the larger split (training)
* and <code>InstanceList[1]</code> is the smaller split (testing)
*/
public InstanceList[] nextSplit () {
InstanceList[] ret = new InstanceList[2];
ret[0] = new InstanceList (pipe);
for (int i=0; i < folds.length; i++) {
if (i==index)
continue;
Iterator<Instance> iter = folds[i].iterator();
while (iter.hasNext())
ret[0].add (iter.next());
}
ret[1] = folds[index].shallowClone();
index++;
return ret;
}
/** Returns the next split, given the number of folds you want in
* the training data. */
public InstanceList[] nextSplit (int numTrainFolds) {
InstanceList[] ret = new InstanceList[2];
ret[0] = new InstanceList (pipe);
ret[1] = new InstanceList (pipe);
// train on folds [index, index+numTrainFolds), test on rest
for (int i = 0; i < folds.length; i++) {
int foldno = (index + i) % folds.length;
InstanceList addTo;
if (i < numTrainFolds) {
addTo = ret[0];
} else {
addTo = ret[1];
}
Iterator<Instance> iter = folds[foldno].iterator();
while (iter.hasNext())
addTo.add (iter.next());
}
index++;
return ret;
}
public InstanceList[] next () { return nextSplit(); }
public void remove () { throw new UnsupportedOperationException(); }
}
/** Returns the pipe through which each added <code>Instance</code> is passed,
* which may be <code>null</code>. */
public Pipe getPipe ()
{
return pipe;
}
/** Change the default Pipe associated with InstanceList.
* This method is very dangerous and should only be used in extreme circumstances!! */
public void setPipe(Pipe p) {
assert (Alphabet.alphabetsMatch(this, p));
pipe = p;
}
/** Returns the <code>Alphabet</code> mapping features of the data to
* integers. */
public Alphabet getDataAlphabet ()
{
if (dataAlphabet == null && pipe != null) {
dataAlphabet = pipe.getDataAlphabet ();
}
assert (pipe == null
|| pipe.getDataAlphabet () == null
|| pipe.getDataAlphabet () == dataAlphabet);
return dataAlphabet;
}
/** Returns the <code>Alphabet</code> mapping target output labels to
* integers. */
public Alphabet getTargetAlphabet ()
{
if (targetAlphabet == null && pipe != null) {
targetAlphabet = pipe.getTargetAlphabet ();
}
assert (pipe == null
|| pipe.getTargetAlphabet () == null
|| pipe.getTargetAlphabet () == targetAlphabet);
return targetAlphabet;
}
public Alphabet getAlphabet () {
return getDataAlphabet();
}
public Alphabet[] getAlphabets () {
return new Alphabet[] {getDataAlphabet(), getTargetAlphabet() };
}
public LabelVector targetLabelDistribution ()
{
if (this.size() == 0) return null;
if (!(get(0).getTarget() instanceof Labeling))
throw new IllegalStateException ("Target is not a labeling.");
double[] counts = new double[getTargetAlphabet().size()];
for (int i = 0; i < this.size(); i++) {
Instance instance = get(i);
Labeling l = (Labeling) instance.getTarget();
l.addTo (counts, getInstanceWeight(i));
}
return new LabelVector ((LabelAlphabet)getTargetAlphabet(), counts);
}
public CrossValidationIterator crossValidationIterator (int nfolds, int seed)
{
return new CrossValidationIterator(nfolds, seed);
}
public CrossValidationIterator crossValidationIterator (int nfolds)
{
return new CrossValidationIterator(nfolds);
}
public static final String TARGET_PROPERTY = "target";
// I'm not sure these methods best belong here. On the other hand it is easy to find and centrally located here. -AKM Jan 2006
public void hideSomeLabels (double proportionToHide, Randoms r)
{
for (int i = 0; i < this.size(); i++) {
if (r.nextBoolean(proportionToHide)) {
Instance instance = this.get(i);
instance.unLock();
if (instance.getProperty(TARGET_PROPERTY) != instance.getTarget())
instance.setProperty(TARGET_PROPERTY, instance.getTarget());
instance.setTarget (null);
instance.lock();
}
}
}
public void hideSomeLabels (BitSet bs)
{
for (int i = 0; i < this.size(); i++) {
if (bs.get(i)) {
Instance instance = this.get(i);
instance.unLock();
if (instance.getProperty(TARGET_PROPERTY) != instance.getTarget())
instance.setProperty(TARGET_PROPERTY, instance.getTarget());
instance.setTarget (null);
instance.lock();
}
}
}
public void unhideAllLabels ()
{
for (int i = 0; i < this.size(); i++) {
Instance instance = this.get(i);
Object t;
if (instance.getTarget() == null && (t=instance.getProperty(TARGET_PROPERTY)) != null) {
instance.unLock();
instance.setTarget(t);
instance.lock();
}
}
}
}
| 34,294 | 31.881112 | 142 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/TokenSequence.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import cc.mallet.util.PropertyList;
/**
* A representation of a piece of text, usually a single word, to which we can attach properties.
*/
public class TokenSequence extends ArrayList<Token> implements Sequence, Serializable {
//ArrayList tokens;
PropertyList properties = null; // for arbitrary properties
public TokenSequence (Collection<Token> tokens) {
super(tokens);
}
public TokenSequence () {
super();
}
public TokenSequence (int capacity) {
super (capacity);
}
public TokenSequence (Token[] tokens) {
this (tokens.length);
for (int i = 0; i < tokens.length; i++)
this.add( tokens[i] );
}
public TokenSequence (Object[] tokens) {
this( tokens.length );
for (int i = 0; i < tokens.length; i++)
this.add (new Token( tokens[i].toString()));
}
//public Token get (int i) {return this.get(i); }
public String toString () {
StringBuffer sb = new StringBuffer();
sb.append( "TokenSequence " + super.toString() + "\n" );
for (int i = 0; i < this.size(); i++) {
String tt = get(i).toString();
sb.append( "Token#" + i + ":" );
sb.append( tt );
if (!tt.endsWith( "\n" ))
sb.append( "\n" );
}
return sb.toString();
}
public String toStringShort () {
StringBuffer sb = new StringBuffer();
for (int i = 0; i < this.size(); i++) {
String tt = get(i).toString();
tt.replaceAll("\n","");
if (i > 0){
sb.append(" ");
}
sb.append(tt);
}
return sb.toString();
}
// gdruck
// This method causes a compiler error in Eclipse Helios.
// Removed support for adding Objects other than String.
/*
public void add (Object o) {
if (o instanceof Token)
add( (Token)o );
else if (o instanceof TokenSequence)
add( (TokenSequence)o );
else
add( new Token( o.toString() ) );
}
*/
public void add(String string) {
add(new Token(string));
}
// added by Fuchun Peng, Oct. 24, 2003
public Object removeLast () {
if (this.size() > 0)
return this.remove (this.size() - 1);
else
return null;
}
public void addAll (Object[] objects) {
for (int i = 0; i < objects.length; i++) {
if (objects[i] instanceof Token)
add( (Token)objects[i] );
else
add( new Token( objects[i].toString() ) );
}
}
public FeatureSequence toFeatureSequence (Alphabet dict) {
FeatureSequence fs = new FeatureSequence( dict, this.size() );
for (int i = 0; i < this.size(); i++)
fs.add (dict.lookupIndex( (this.get(i)).getText()));
return fs;
}
public FeatureVector toFeatureVector (Alphabet dict) {
return new FeatureVector( toFeatureSequence( dict ) );
}
public void setNumericProperty (String key, double value) {
properties = PropertyList.add( key, value, properties );
}
public void setProperty (String key, Object value) {
properties = PropertyList.add( key, value, properties );
}
public double getNumericProperty (String key) {
return properties.lookupNumber( key );
}
public Object getProperty (String key) {
return properties.lookupObject( key );
}
public boolean hasProperty (String key) {
return properties.hasProperty( key );
}
// added gmann 8/30/2006
public PropertyList getProperties () {
return properties;
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject(ObjectOutputStream out) throws IOException {
out.writeInt( CURRENT_SERIAL_VERSION );
out.defaultWriteObject();
}
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
@SuppressWarnings("unused")
int version = in.readInt();
in.defaultReadObject();
}
}
| 4,368 | 23.544944 | 97 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Multinomial.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types;
import java.io.Serializable;
import java.io.ObjectOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import cc.mallet.types.Alphabet;
import cc.mallet.types.FeatureSequence;
import cc.mallet.types.FeatureVector;
import cc.mallet.util.Randoms;
/**
* A probability distribution over a set of features represented as a {@link cc.mallet.types.FeatureVector}.
* The values associated with each element in the Multinomial/FeaturVector are probabilities
* and should sum to 1.
* Features are indexed using feature indices - the index into the underlying Alphabet -
* rather than using locations the way FeatureVectors do.
* <p>
* {@link cc.mallet.types.Multinomial.Estimator} provides a subhierachy
* of ways to generate an estimate of the probability distribution from counts associated
* with the features.
*
* @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
public class Multinomial extends FeatureVector
{
// protected Multinomial () { }
// "size" is the number of entries in "probabilities" that have valid values in them;
// note that the dictionary (and thus the resulting multinomial) may be bigger than size
// if the dictionary is shared with multiple estimators, and the dictionary grew
// due to another estimator.
private static double[] getValues (double[] probabilities, Alphabet dictionary,
int size, boolean copy, boolean checkSum)
{
double[] values;
assert (dictionary == null || dictionary.size() >= size);
// No, not necessarily true; see comment above.
//assert (dictionary == null || dictionary.size() == size);
//assert (probabilities.length == size);
// xxx Consider always copying, so that we are assured that we
// always have a real probability distribution.
if (copy) {
values = new double[dictionary==null ? size : dictionary.size()];
System.arraycopy (probabilities, 0, values, 0, size);
} else {
assert (dictionary == null || dictionary.size() == probabilities.length);
values = probabilities;
}
if (checkSum) {
// Check that we have a true probability distribution
double sum = 0;
for (int i = 0; i < values.length; i++)
sum += values[i];
if (Math.abs (sum - 1.0) > 0.9999){
throw new IllegalArgumentException ("Probabilities sum to " + sum + ", not to one.");
}
}
return values;
}
protected Multinomial (double[] probabilities, Alphabet dictionary,
int size, boolean copy, boolean checkSum)
{
super (dictionary, getValues(probabilities, dictionary, size, copy, checkSum));
}
public Multinomial (double[] probabilities, Alphabet dictionary)
{
this (probabilities, dictionary, dictionary.size(), true, true);
}
public Multinomial (double[] probabilities, int size)
{
this (probabilities, null, size, true, true);
}
public Multinomial (double[] probabilities)
{
this (probabilities, null, probabilities.length, true, true);
}
public int size ()
{
return values.length;
}
public double probability (int featureIndex)
{
return values[featureIndex];
}
public double probability (Object key)
{
if (dictionary == null)
throw new IllegalStateException ("This Multinomial has no dictionary.");
return probability (dictionary.lookupIndex (key));
}
public double logProbability (int featureIndex)
{
return Math.log(values[featureIndex]);
}
public double logProbability (Object key)
{
if (dictionary == null)
throw new IllegalStateException ("This Multinomial has no dictionary.");
return logProbability (dictionary.lookupIndex (key));
}
public Alphabet getAlphabet ()
{
return dictionary;
}
public void addProbabilitiesTo (double[] vector)
{
for (int i = 0; i < values.length; i++)
vector[i] += values[i];
}
public int randomIndex (Randoms r)
{
double f = r.nextUniform();
double sum = 0;
int i;
for (i = 0; i < values.length; i++) {
sum += values[i];
//System.out.print (" sum="+sum);
if (sum >= f)
break;
}
//if (sum < f) throw new IllegalStateException
//System.out.println ("i = "+i+", f = "+f+", sum = "+sum);
assert (sum >= f);
return i;
}
public Object randomObject (Randoms r)
{
if (dictionary == null)
throw new IllegalStateException ("This Multinomial has no dictionary.");
return dictionary.lookupObject (randomIndex (r));
}
public FeatureSequence randomFeatureSequence (Randoms r, int length)
{
if (! (dictionary instanceof Alphabet))
throw new UnsupportedOperationException
("Multinomial's dictionary must be a Alphabet");
FeatureSequence fs = new FeatureSequence ((Alphabet)dictionary, length);
while (length-- > 0)
fs.add (randomIndex (r));
return fs;
}
// "size" is the number of 1.0-weight features in the feature vector
public FeatureVector randomFeatureVector (Randoms r, int size)
{
return new FeatureVector (randomFeatureSequence (r, size));
}
/** A Multinomial in which the values associated with each feature index fi is
* Math.log(probability[fi]) instead of probability[fi].
* Logs are used for numerical stability.
*/
public static class Logged extends Multinomial
{
private static final long serialVersionUID = 1L;
public Logged (double[] probabilities, Alphabet dictionary,
int size, boolean areLoggedAlready)
{
super (probabilities, dictionary, size, true, !areLoggedAlready);
assert (dictionary == null || dictionary.size() == size);
if (!areLoggedAlready)
for (int i = 0; i < size; i++)
values[i] = Math.log (values[i]);
}
public Logged (double[] probabilities, Alphabet dictionary,
boolean areLoggedAlready)
{
this (probabilities, dictionary,
(dictionary == null ? probabilities.length : dictionary.size()),
areLoggedAlready);
}
public Logged (double[] probabilities, Alphabet dictionary, int size)
{
this (probabilities, dictionary, size, false);
}
public Logged (double[] probabilities, Alphabet dictionary)
{
this (probabilities, dictionary, dictionary.size(), false);
}
public Logged (Multinomial m)
{
this (m.values, m.dictionary, false);
}
public Logged (double[] probabilities)
{
this (probabilities, null, false);
}
public double probability (int featureIndex)
{
return Math.exp (values[featureIndex]);
}
public double logProbability (int featureIndex)
{
return values[featureIndex];
}
public void addProbabilities (double[] vector)
{
assert (vector.length == values.length);
for (int fi = 0; fi < vector.length; fi++)
vector[fi] += Math.exp(values[fi]);
}
public void addLogProbabilities (double[] vector)
{
for (int i = 0; i < values.length; i++)
vector[i] += values[i];
// if vector is longer than values, act as if values
// were extended with values of minus infinity.
for (int i=values.length; i<vector.length; i++){
vector[i] = Double.NEGATIVE_INFINITY;
}
}
}
// Serialization
private static final long serialVersionUID = 1L;
// xxx Make this inherit from something like AugmentableDenseFeatureVector
/**
* A hierarchy of classes used to produce estimates of probabilities, in
* the form of a Multinomial, from counts associated with the elements
* of an Alphabet.
*
* Estimator itself contains the machinery for associating and manipulating
* counts with elements of an Alphabet, including behaving sanely if the
* Alphabet changes size between calls. It does not contain any means
* of generating probability estimates; various means of estimating are
* provided by subclasses.
*/
public static abstract class Estimator implements Cloneable, Serializable
{
Alphabet dictionary;
double counts[];
int size; // The number of valid entries in counts[]
static final int minCapacity = 16;
protected Estimator (double counts[], int size, Alphabet dictionary)
{
this.counts = counts;
this.size = size;
this.dictionary = dictionary;
}
public Estimator (double counts[], Alphabet dictionary)
{
this (counts, dictionary.size(), dictionary);
}
public Estimator ()
{
this (new double[minCapacity], 0, null);
}
public Estimator (int size)
{
this (new double[size > minCapacity ? size : minCapacity], size, null);
}
public Estimator (Alphabet dictionary)
{
this(new double[dictionary.size()], dictionary.size(), dictionary);
}
public void setAlphabet (Alphabet d)
{
this.size = d.size();
this.counts = new double[size];
this.dictionary = d;
}
public int size ()
{
return (dictionary == null ? size : dictionary.size());
}
protected void ensureCapacity (int index)
{
//assert (dictionary == null); // Size is fixed if dictionary present?
if (index > size)
size = index;
if (counts.length <= index) {
int newLength = ((counts.length < minCapacity)
? minCapacity
: counts.length);
while (newLength <= index)
newLength *= 2;
double[] newCounts = new double[newLength];
System.arraycopy (counts, 0, newCounts, 0, counts.length);
this.counts = newCounts;
}
}
// xxx Note that this does not reset the "size"!
public void reset ()
{
for (int i = 0; i < counts.length; i++)
counts[i] = 0;
}
// xxx Remove this method?
private void setCounts (double counts[])
{
assert (dictionary == null || counts.length <= size());
// xxx Copy instead?
// xxx Set size() to match counts.length?
this.counts = counts;
}
public void increment (int index, double count)
{
ensureCapacity (index);
counts[index] += count;
if (size < index + 1)
size = index + 1;
}
public void increment (String key, double count)
{
increment (dictionary.lookupIndex (key), count);
}
// xxx Add "public void increment (Object key, double count)", or is it too dangerous?
public void increment (FeatureSequence fs, double scale)
{
if (fs.getAlphabet() != dictionary)
throw new IllegalArgumentException ("Vocabularies don't match.");
for (int fsi = 0; fsi < fs.size(); fsi++)
increment (fs.getIndexAtPosition(fsi), scale);
}
public void increment (FeatureSequence fs)
{
increment (fs, 1.0);
}
public void increment (FeatureVector fv, double scale)
{
if (fv.getAlphabet() != dictionary)
throw new IllegalArgumentException ("Vocabularies don't match.");
for (int fvi = 0; fvi < fv.numLocations(); fvi++)
// Originally, the value of the feature was not being taken into account here,
// so words were only counted once per document! - gdruck
// increment (fv.indexAtLocation(fvi), scale);
increment(fv.indexAtLocation(fvi), scale * fv.valueAtLocation(fvi));
}
public void increment (FeatureVector fv)
{
increment (fv, 1.0);
}
public double getCount (int index)
{
return counts[index];
}
public Object clone ()
{
try {
return super.clone ();
} catch (CloneNotSupportedException e) {
return null;
}
}
public void print () {
//if (counts != null) throw new IllegalStateException ("Foo");
System.out.println ("Multinomial.Estimator");
for (int i = 0; i < size; i++)
System.out.println ("counts["+i+"] = " + counts[i]);
}
public abstract Multinomial estimate ();
// Serialization
// serialVersionUID is overriden to prevent innocuous changes in this
// class from making the serialization mechanism think the external
// format has changed.
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject(ObjectOutputStream out) throws IOException
{
out.writeInt(CURRENT_SERIAL_VERSION);
out.writeObject(dictionary);
out.writeObject(counts);
out.writeInt(size);
}
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException
{
int version = in.readInt();
if (version != CURRENT_SERIAL_VERSION)
throw new ClassNotFoundException("Mismatched Multionmial.Estimator versions: wanted " +
CURRENT_SERIAL_VERSION + ", got " +
version);
dictionary = (Alphabet) in.readObject();
counts = (double []) in.readObject();
size = in.readInt();
}
} // class Estimator
/**
* An Estimator in which probability estimates in a Multinomial
* are generated by adding a constant m (specified at construction time)
* to each count before dividing by the total of the m-biased counts.
*/
public static class MEstimator extends Estimator
{
double m;
public MEstimator (Alphabet dictionary, double m)
{
super (dictionary);
this.m = m;
}
public MEstimator (int size, double m)
{
super(size);
this.m = m;
}
public MEstimator (double m)
{
super();
this.m = m;
}
public Multinomial estimate ()
{
double[] pr = new double[dictionary==null ? size : dictionary.size()];
if (dictionary != null){
ensureCapacity(dictionary.size() -1 ); //side effect: updates size member
}
double sum = 0;
for (int i = 0; i < pr.length; i++) {
//if (dictionary != null) System.out.println (dictionary.lookupObject(i).toString()+' '+counts[i]);
pr[i] = counts[i] + m;
sum += pr[i];
}
for (int i = 0; i < pr.length; i++)
pr[i] /= sum;
return new Multinomial (pr, dictionary, size, false, false);
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject(ObjectOutputStream out) throws IOException
{
out.writeInt(CURRENT_SERIAL_VERSION);
out.writeDouble(m);
}
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException
{
int version = in.readInt();
if (version != CURRENT_SERIAL_VERSION)
throw new ClassNotFoundException("Mismatched Multinomial.MEstimator versions: wanted " +
CURRENT_SERIAL_VERSION + ", got " +
version);
m = in.readDouble();
}
} // end MEstimator
/**
* An MEstimator with m set to 0. The probability estimates in the Multinomial
* are generated by dividing each count by the sum of all counts.
*/
public static class MLEstimator extends MEstimator
{
public MLEstimator ()
{
super (0);
}
public MLEstimator (int size)
{
super (size, 0);
}
public MLEstimator (Alphabet dictionary)
{
super (dictionary, 0);
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject(ObjectOutputStream out) throws IOException
{
out.writeInt(CURRENT_SERIAL_VERSION);
}
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException
{
int version = in.readInt();
if (version != CURRENT_SERIAL_VERSION)
throw new ClassNotFoundException("Mismatched Multinomial.MLEstimator versions: wanted " +
CURRENT_SERIAL_VERSION + ", got " +
version);
}
} // class MLEstimator
/**
* An MEstimator with m set to 1. The probability estimates in the Multinomial
* are generated by adding 1 to each count and then dividing each
* 1-biased count by the sum of all 1-biased counts.
*/
public static class LaplaceEstimator extends MEstimator
{
public LaplaceEstimator ()
{
super (1);
}
public LaplaceEstimator (int size)
{
super (size, 1);
}
public LaplaceEstimator (Alphabet dictionary)
{
super (dictionary, 1);
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject(ObjectOutputStream out) throws IOException
{
out.writeInt(CURRENT_SERIAL_VERSION);
}
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException
{
int version = in.readInt();
if (version != CURRENT_SERIAL_VERSION)
throw new ClassNotFoundException("Mismatched Multinomial.LaplaceEstimator versions: wanted " +
CURRENT_SERIAL_VERSION + ", got " +
version);
}
} // class Multinomial.LaplaceEstimator
//todo: Lazy, lazy lazy. Make this serializable, too.
/**
* Unimplemented, but the MEstimators are.
*/
public static class MAPEstimator extends Estimator
{
Dirichlet prior;
public MAPEstimator (Dirichlet d)
{
super (d.size());
prior = d;
}
public Multinomial estimate ()
{
// xxx unfinished.
return null;
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject(ObjectOutputStream out) throws IOException
{
out.writeInt(CURRENT_SERIAL_VERSION);
}
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException
{
int version = in.readInt();
if (version != CURRENT_SERIAL_VERSION)
throw new ClassNotFoundException("Mismatched Multinomial.MAPEstimator versions: wanted " +
CURRENT_SERIAL_VERSION + ", got " +
version);
}
}
}
| 18,337 | 27.788069 | 108 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/DenseVector.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.io.*;
public class DenseVector extends DenseMatrix implements Vector, Serializable
{
public DenseVector (double[] values, boolean copy)
{
if (copy) {
this.values = new double[values.length];
System.arraycopy (values, 0, this.values, 0, values.length);
} else
this.values = values;
}
public DenseVector (double[] values) { this (values, true); }
public DenseVector (int size) { this (new double[size], false); }
public int getNumDimensions () { return 1; }
public int getDimensions (int[] sizes) { sizes[0] = values.length; return 1; }
public double value (int[] indices) {
assert (indices.length == 1);
return values[indices[0]];
}
public double value (int index) {
return values[index];
}
public void setValue (int[] indices, double value) {
assert (indices.length == 1);
values[indices[0]] = value;
}
public void setValue (int index, double value) {
values[index] = value;
}
public void columnPlusEquals (int columnIndex, double value)
{
values[columnIndex] += value;
}
public ConstantMatrix cloneMatrix () {
return new DenseVector (values, true);
}
public int singleIndex (int[] indices) { assert (indices.length == 1); return indices[0]; }
public void singleToIndices (int i, int[] indices) { indices[0] = i; }
// Copy the contents of Matrix m into this Vector starting at index
// i in this Vector, laying out Matrix m in "getSingle()" order.
// Return the next index that could be set in this DenseVector after
// the indices filled by Matrix m.
public final int arrayCopyFrom (int i, Matrix m) {
if (m instanceof DenseVector) {
System.arraycopy (((DenseVector)m).values, 0, values, i, ((DenseVector)m).values.length);
return i + ((DenseVector)m).values.length;
} else if (m instanceof Matrix2) {
((Matrix2)m).arrayCopyInto (values, i);
return i + m.singleSize();
} else {
for (int j = 0; j < m.singleSize(); j++)
values[i++] = m.singleValue (j);
return i;
}
}
/** Copy values from an array into this vector. The array should have the
* same size as the vector */
public final void arrayCopyFrom( double[] a )
{
arrayCopyFrom(a,0);
}
/** Copy values from an array starting at a particular index into this
* vector. The array must have at least as many values beyond the starting
* index as there are in the vector.
*
* @return Next uncopied index in the array.
*/
public final int arrayCopyFrom( double [] a , int startingArrayIndex )
{
System.arraycopy( a, startingArrayIndex, values, 0, values.length );
return startingArrayIndex + values.length;
}
// Copy the contents of this Vector into Matrix m starting at index
// i in this Vector, setting values in Matrix m in "setSingle()" order.
// Return the next index that could be gotten after the indices copied
// into Matrix m.
public final int arrayCopyTo (int i, Matrix m) {
if (m instanceof DenseVector) {
System.arraycopy (values, i, ((DenseVector)m).values, 0, ((DenseVector)m).values.length);
return i + ((DenseVector)m).values.length;
} else if (m instanceof Matrix2) {
((Matrix2)m).arrayCopyFrom (values, i);
return i + m.singleSize();
} else {
for (int j = 0; j < m.singleSize(); j++)
m.setSingleValue (j, values[i++]);
return i;
}
}
public final int arrayCopyTo (int i, double[] a) {
System.arraycopy (values, i, a, 0, a.length);
return i + a.length;
}
/** Copy the contents of this vector into an array starting at a particular
* index.
*
* @return Next available index in the array
*/
public final int arrayCopyInto (double[] array, int startingArrayIndex)
{
System.arraycopy (values, 0, array, startingArrayIndex, values.length);
return startingArrayIndex + values.length;
}
public void addTo (double[] v)
{
assert (v.length == values.length);
for (int i = 0; i < values.length; i++)
v[i] += values[i];
}
public void addTo (double[] v, double factor)
{
assert (v.length == values.length);
for (int i = 0; i < values.length; i++)
v[i] += values[i] * factor;
}
public static double sum (double[] v)
{
double sum = 0;
for (int i = 0; i < v.length; i++)
sum += v[i];
return sum;
}
public static double normalize (double[] v)
{
double sum = 0;
for (int i = 0; i < v.length; i++)
sum += v[i];
assert (sum != 0);
for (int i = 0; i < v.length; i++)
v[i] /= sum;
return sum;
}
public static double max (double[] v)
{
double max = Double.NEGATIVE_INFINITY;
for (int i = 0; i < v.length; i++)
if (v[i] > max)
max = v[i];
return max;
}
public static void print (double[] v)
{
System.out.print ("[");
for (int i = 0; i < v.length; i++)
System.out.print (" " + v[i]);
System.out.println ("]");
}
public static void print (int[] v)
{
System.out.print ("[");
for (int i = 0; i < v.length; i++)
System.out.print (" " + v[i]);
System.out.println ("]");
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
int i, size;
out.writeInt (CURRENT_SERIAL_VERSION);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
}
}
| 5,933 | 26.472222 | 92 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/CachedMetric.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/** Interface for a measure of distance between two <CODE>ConstantVector</CODE>s
@author Jerod Weinman <A HREF="mailto:[email protected]">[email protected]</A>
*/
package cc.mallet.types;
import cc.mallet.types.SparseVector;
/**
Stores a hash for each object being compared for efficient
computation.
*/
public interface CachedMetric extends Metric {
public double distance( SparseVector a, int hashCodeA,
SparseVector b, int hashCodeB);
}
| 893 | 30.928571 | 88 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/PerLabelInfoGain.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
public class PerLabelInfoGain
{
final static float log2 = (float)Math.log(2);
static boolean binary = true;
static boolean print = false;
InfoGain[] ig;
public PerLabelInfoGain (InstanceList ilist)
{
double[][] pcig = calcPerLabelInfoGains (ilist);
Alphabet v = ilist.getDataAlphabet();
int numClasses = ilist.getTargetAlphabet().size();
ig = new InfoGain[numClasses];
for (int i = 0; i < numClasses; i++)
ig[i] = new InfoGain (v, pcig[i]);
}
public InfoGain getInfoGain (int classIndex)
{
return ig[classIndex];
}
public int getNumClasses ()
{
return ig.length;
}
private static double entropy (double pc, double pnc)
{
assert (Math.abs((pc+pnc)-1) < 0.0001) : "pc="+pc+" pnc="+pnc;
if (pc == 0 || pnc == 0)
return (float) 0;
else {
float ret = (float) (- pc*Math.log(pc)/log2 - pnc*Math.log(pnc)/log2);
assert (ret >= 0) : "pc="+pc+" pnc="+pnc;
return ret;
}
}
public static double[][] calcPerLabelInfoGains (InstanceList ilist)
{
assert (binary);
double[][] classFeatureCounts;
int[] featureCounts;
int[] classCounts;
int numClasses = ilist.getTargetAlphabet().size();
int numFeatures = ilist.getDataAlphabet().size();
int numInstances = ilist.size();
// Fill in the classFeatureCounts
classFeatureCounts = new double[numClasses][numFeatures];
featureCounts = new int[numFeatures];
classCounts = new int[numClasses];
/*
for (int fi = 0; fi < numFeatures; fi++)
featureCounts[fi] = 0;
for (int ci = 0; ci < numClasses; ci++) {
classCounts[ci] = 0;
for (int fi = 0; fi < numFeatures; fi++)
classFeatureCounts[ci][fi] = 0;
}
*/
for (int i = 0; i < ilist.size(); i++) {
Instance instance = ilist.get(i);
FeatureVector fv = (FeatureVector) instance.getData();
// xxx Note that this ignores uncertainly-labeled instances!
int classIndex = instance.getLabeling().getBestIndex();
classCounts[classIndex]++;
for (int fvi = 0; fvi < fv.numLocations(); fvi++) {
int featureIndex = fv.indexAtLocation(fvi);
classFeatureCounts[classIndex][featureIndex]++;
featureCounts[featureIndex]++;
//System.out.println ("fi="+featureIndex+" ni="+numInstances+" fc="+featureCounts[featureIndex]+" i="+i);
assert (featureCounts[featureIndex] <= numInstances)
: "fi="+featureIndex+"ni="+numInstances+" fc="+featureCounts[featureIndex]+" i="+i;
}
}
Alphabet v = ilist.getDataAlphabet();
if (print)
for (int ci = 0; ci < numClasses; ci++)
System.out.println (ilist.getTargetAlphabet().lookupObject(ci).toString()+"="+ci);
// Let C_i be a random variable on {c_i, !c_i}
// per-class entropy of feature f_j = H(C_i|f_j)
// H(C_i|f_j) = - P(c_i|f_j) log(P(c_i|f_j) - P(!c_i|f_j) log(P(!c_i|f_j)
// First calculate the per-class entropy, not conditioned on any feature
// and store it in classCounts[]
double[] classEntropies = new double[numClasses];
for (int ci = 0; ci < numClasses; ci++) {
double pc, pnc;
pc = ((double)classCounts[ci])/numInstances;
pnc = ((double)numInstances-classCounts[ci])/numInstances;
classEntropies[ci] = entropy (pc, pnc);
}
// Calculate per-class infogain of each feature, and store it in classFeatureCounts[]
for (int fi = 0; fi < numFeatures; fi++) {
double pf = ((double)featureCounts[fi])/numInstances;
double pnf = ((double)numInstances-featureCounts[fi])/numInstances;
assert (pf >= 0);
assert (pnf >= 0);
if (print && fi < 10000) {
System.out.print (v.lookupObject(fi).toString());
for (int ci = 0; ci < numClasses; ci++) {
System.out.print (" "+classFeatureCounts[ci][fi]);
}
System.out.println ("");
}
//assert (sum == featureCounts[fi]);
for (int ci = 0; ci < numClasses; ci++) {
if (featureCounts[fi] == 0) {
classFeatureCounts[ci][fi] = 0;
continue;
}
double pc, pnc, ef;
// Calculate the {ci,!ci}-entropy given that the feature does occur
pc = ((double)classFeatureCounts[ci][fi]) / featureCounts[fi];
pnc = ((double)featureCounts[fi]-classFeatureCounts[ci][fi]) / featureCounts[fi];
ef = entropy (pc, pnc);
// Calculate the {ci,!ci}-entropy given that the feature does not occur
pc = ((double)classCounts[ci]-classFeatureCounts[ci][fi]) / (numInstances-featureCounts[fi]);
pnc = ((double)(numInstances-featureCounts[fi])-(classCounts[ci]-classFeatureCounts[ci][fi])) / (numInstances-featureCounts[fi]);
double enf = entropy(pc, pnc);
classFeatureCounts[ci][fi] = classEntropies[ci] - (pf*ef + pnf*enf);
if (print && fi < 10000)
System.out.println ("pf="+pf+" ef="+ef+" pnf="+pnf+" enf="+enf+" e="+classEntropies[ci]+" cig="+classFeatureCounts[ci][fi]);
}
}
// Print selected features
if (print) {
for (int fi = 0; fi < 100; fi++) {
String featureName = v.lookupObject(fi).toString();
for (int ci = 0; ci < numClasses; ci++) {
String className = ilist.getTargetAlphabet().lookupObject(ci).toString();
if (classFeatureCounts[ci][fi] > .1) {
System.out.println (featureName+','+className+'='+classFeatureCounts[ci][fi]);
}
}
}
}
return classFeatureCounts;
}
public static class Factory implements RankedFeatureVector.PerLabelFactory
{
public Factory ()
{
}
public RankedFeatureVector[] newRankedFeatureVectors (InstanceList ilist)
{
PerLabelInfoGain x = new PerLabelInfoGain (ilist);
return x.ig;
}
}
}
| 5,964 | 32.138889 | 133 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/FeatureVector.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Arrays;
import java.util.logging.*;
import java.io.*;
import cc.mallet.types.Alphabet;
import cc.mallet.types.FeatureSequence;
import cc.mallet.types.Vector;
import cc.mallet.util.MalletLogger;
import cc.mallet.util.PropertyList;
/**
* A subset of an {@link cc.mallet.types.Alphabet} in which each element of the subset has an associated value.
* The subset is represented as a {@link cc.mallet.types.SparseVector}
* <p>
* A SparseVector represents only the non-zero locations of a vector. In the case of a FeatureVector,
* a location represents the index of an entry in the Alphabet that is contained in
* the FeatureVector.
* <p>
* To loop over the elements of a feature vector, one loops over the consecutive integers between 0
* and the number of locations in the feature vector. From these locations one can cheaply
* obtain the index of the entry in the underlying Alphabet, the entry itself, and the value
* in this feature vector associated the entry.
* <p>
* A SparseVector (or FeatureVector) can be sparse or dense depending on whether or not
* an array if indices is specified at construction time. If the FeatureVector is dense,
* the mapping from location to index is the identity mapping.
* <p>
* The associated value of an element in a SparseVector (or FeatureVector) can be
* a double or binary (0.0 or 1.0), depending on whether an array of doubles is specified at
* contruction time.
*
* @see SparseVector
* @see Alphabet
*
* @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
public class FeatureVector extends SparseVector implements Serializable, AlphabetCarrying
{
private static Logger logger = MalletLogger.getLogger(FeatureVector.class.getName());
Alphabet dictionary;
protected FeatureVector (Alphabet dict,
int[] indices, double[] values,
int capacity, int size,
boolean copy,
boolean checkIndicesSorted,
boolean removeDuplicates)
{
super (indices, values, capacity, size, copy, checkIndicesSorted, removeDuplicates);
this.dictionary = dict;
}
/** Create a dense vector */
public FeatureVector (Alphabet dict, double[] values)
{
super (values);
this.dictionary = dict;
}
/** Create non-binary vector, possibly dense if "featureIndices" or possibly sparse, if not */
public FeatureVector (Alphabet dict,
int[] featureIndices,
double[] values)
{
super (featureIndices, values);
this.dictionary = dict;
}
/** Create binary vector */
public FeatureVector (Alphabet dict,
int[] featureIndices)
{
super (featureIndices);
this.dictionary = dict;
}
public static int[] getObjectIndices(Object[] entries, Alphabet dict, boolean addIfNotPresent)
{
int[] feats = new int[entries.length];
for (int i = 0; i < entries.length; i++) {
feats[i] = dict.lookupIndex (entries[i], addIfNotPresent);
if (feats[i] == -1)
throw new IllegalArgumentException ("Object is not in dictionary.");
}
return feats;
}
public FeatureVector (Alphabet dict, Object[] keys, double[] values)
{
this (dict, getObjectIndices(keys, dict, true), values);
}
private static int[] sortedFeatureIndexSequence (FeatureSequence fs)
{
int[] feats = fs.toFeatureIndexSequence ();
java.util.Arrays.sort (feats);
return feats;
}
public FeatureVector (FeatureSequence fs, boolean binary)
{
super (fs.toSortedFeatureIndexSequence(), false, false, true, binary);
this.dictionary = (Alphabet) fs.getAlphabet();
}
public FeatureVector (FeatureSequence fs)
{
this (fs, false);
}
public FeatureVector (Alphabet dict, PropertyList pl, boolean binary,
boolean growAlphabet)
{
super (dict, pl, binary, growAlphabet);
this.dictionary = dict;
}
public FeatureVector (Alphabet dict, PropertyList pl, boolean binary) {
this (dict, pl, binary, true);
}
private static int[] indicesWithConjunctions (FeatureVector fv, Alphabet newVocab, int[] conjunctions)
{
assert (fv.values == null); // Only works on binary feature vectors
assert (! (fv instanceof AugmentableFeatureVector));
Alphabet v = fv.getAlphabet();
// newVocab should be an augmented copy of v
assert (v.size() <= newVocab.size())
: "fv.vocab.size="+v.size()+" newVocab.size="+newVocab.size();
int[] newIndices = new int[fv.indices.length * conjunctions.length];
java.util.Arrays.sort (conjunctions);
System.arraycopy (fv.indices, 0, newIndices, 0, fv.indices.length);
int size = fv.indices.length;
int ci = 0;
for (int i = 0; i < fv.indices.length; i++) {
if (ci < conjunctions.length && conjunctions[ci] < fv.indices[i])
ci++;
if (conjunctions[ci] == fv.indices[i]) {
for (int j = 0; j < fv.indices.length; j++) {
if (conjunctions[ci] != fv.indices[j]) {
int index = newVocab.lookupIndex (FeatureConjunction.getName (v, conjunctions[ci], fv.indices[j]));
if (index == newVocab.size()-1 && index % 3 == 0)
logger.info ("New feature "+ newVocab.lookupObject(index));
if (index != -1) // this can be -1 if newVocab.growthStopped
newIndices[size++] = index;
}
}
}
}
// Sort and remove duplicates
Arrays.sort (newIndices, 0, size);
for (int i = 1; i < size; i++) {
if (newIndices[i-1] == newIndices[i]) {
for (int j = i+1; j < size; j++)
newIndices[j-1] = newIndices[j];
size--;
}
}
int[] ret = new int[size];
System.arraycopy (newIndices, 0, ret, 0, size);
return ret;
}
private static int[] indicesWithConjunctions (FeatureVector fv, Alphabet newVocab,
FeatureSelection fsNarrow,
FeatureSelection fsWide)
{
assert (fv.values == null); // Only works on binary feature vectors
////assert (! (fv instanceof AugmentableFeatureVector));
Alphabet v = fv.getAlphabet();
// newVocab should be an augmented copy of v
assert (v.size() <= newVocab.size())
: "fv.vocab.size="+v.size()+" newVocab.size="+newVocab.size();
int length;
if (fv instanceof AugmentableFeatureVector) {
length = ((AugmentableFeatureVector)fv).size;
((AugmentableFeatureVector)fv).sortIndices();
} else {
length = fv.indices.length;
}
int[] newIndices = new int[length * length];
System.arraycopy (fv.indices, 0, newIndices, 0, length);
int size = length;
int ci = 0;
for (int i = 0; i < length; i++) {
if (fsNarrow != null && !fsNarrow.contains (fv.indices[i]))
continue;
for (int j = 0; j < length; j++) {
if ((fsWide == null || fsWide.contains (fv.indices[j]))
&& fv.indices[i] != fv.indices[j]
//&& !FeatureConjunction.featuresOverlap (v, fv.indices[i], fv.indices[j]))
)
{
int index = newVocab.lookupIndex (FeatureConjunction.getName (v, fv.indices[i], fv.indices[j]));
if (index != -1) // this can be -1 if newVocab.growthStopped
newIndices[size++] = index;
}
}
}
// Sort and remove duplicates
Arrays.sort (newIndices, 0, size);
for (int i = 1; i < size; i++) {
if (newIndices[i-1] == newIndices[i]) {
for (int j = i+1; j < size; j++)
newIndices[j-1] = newIndices[j];
size--;
}
}
int[] ret = new int[size];
System.arraycopy (newIndices, 0, ret, 0, size);
return ret;
}
/** New feature vector containing all the features of "fv", plus new
features created by making conjunctions between the features in
"conjunctions" and all the other features. */
public FeatureVector (FeatureVector fv, Alphabet newVocab, int[] conjunctions)
{
this (newVocab, indicesWithConjunctions (fv, newVocab, conjunctions));
}
public FeatureVector (FeatureVector fv, Alphabet newVocab,
FeatureSelection fsNarrow, FeatureSelection fsWide)
{
this (newVocab, indicesWithConjunctions (fv, newVocab, fsNarrow, fsWide));
}
/** Construct a new FeatureVector, selecting only those features in fs, and having new
* (presumably more compact, dense) Alphabet. */
public static FeatureVector newFeatureVector (FeatureVector fv, Alphabet newVocab, FeatureSelection fs)
{
assert (fs.getAlphabet() == fv.dictionary);
if (fv.indices == null) {
throw new UnsupportedOperationException("Not yet implemented for dense feature vectors.");
}
// this numLocations() method call ensures that AugmentableFeatureVectors have been compressed
int fvNumLocations = fv.numLocations();
int[] indices = new int[fvNumLocations];
double[] values = null;
// if feature vectors are binary
if (fv.values != null) {
values = new double[indices.length];
}
int size = 0;
for (int index = 0; index < fvNumLocations; index++) {
if (fs.contains(fv.indices[index])) {
try{
indices[size] = newVocab.lookupIndex(fv.dictionary.lookupObject(fv.indices[index]), true);
} catch (Exception e) {
System.out.println (e.toString());
}
// if feature vectors are binary
if (fv.values != null) {
values[size] = fv.values[index];
}
size++;
}
}
return new FeatureVector (newVocab, indices, values, size, size, true, true, false);
}
// xxx We need to implement this in FeatureVector subclasses
public ConstantMatrix cloneMatrix ()
{
return new FeatureVector ((Alphabet)dictionary, indices, values);
}
public ConstantMatrix cloneMatrixZeroed () {
assert (values != null);
if (indices == null)
return new FeatureVector (dictionary, new double[values.length]);
else {
int[] newIndices = new int[indices.length];
System.arraycopy (indices, 0, newIndices, 0, indices.length);
return new FeatureVector (dictionary, newIndices, new double[values.length],
values.length, values.length, false, false, false);
}
}
public String toString ()
{
return toString (false);
}
// CPAL - added this to output Feature vectors to a text file in a simple format
public boolean toSimpFile (String FileName, int curdocNo, boolean printcounts)
{
//Thread.currentThread().dumpStack();
StringBuffer sb = new StringBuffer ();
//System.out.println ("FeatureVector toString dictionary="+dictionary);
if (values == null) {
//System.out.println ("FeatureVector toString values==null");
int indicesLength = numLocations();
for (int i = 0; i < indicesLength; i++) {
//System.out.println ("FeatureVector toString i="+i);
if (dictionary == null)
sb.append ("["+i+"]");
else {
//System.out.println ("FeatureVector toString: i="+i+" index="+indices[i]);
sb.append (dictionary.lookupObject(indices[i]).toString());
//sb.append ("("+indices[i]+")");
}
//sb.append ("= 1.0 (forced binary)");
//if (!onOneLine)
sb.append ('\n');
//else
// sb.append (' ');
}
} else {
//System.out.println ("FeatureVector toString values!=null");
int valuesLength = numLocations();
for (int i = 0; i < valuesLength; i++) {
int idx = indices == null ? i : indices[i];
if (dictionary == null)
sb.append ("["+i+"]");
else {
//sb.append (dictionary.lookupObject(idx).toString());
//sb.append ("(" + idx +")");
sb.append(curdocNo + " " + idx );
}
//sb.append ("=");
// CPAL - optionally include the counts
if (printcounts)
sb.append (" " + values[i]);
//if (!onOneLine)
sb.append ("\n");
//else
// sb.append (' ');
}
}
//return sb.toString();
String str = sb.toString();
File myfile = new File(FileName);
try{
FileWriter out = new FileWriter(myfile,true); // true -> append to the file
out.write(str);
out.close();
} catch (IOException e) {
System.err.println("Feature Vector exception when trying to print a file");
}
return true;
}
public String toString (boolean onOneLine)
{
//Thread.currentThread().dumpStack();
StringBuffer sb = new StringBuffer ();
//System.out.println ("FeatureVector toString dictionary="+dictionary);
if (values == null) {
//System.out.println ("FeatureVector toString values==null");
int indicesLength = numLocations();
for (int i = 0; i < indicesLength; i++) {
//System.out.println ("FeatureVector toString i="+i);
if (dictionary == null)
sb.append ("["+i+"]");
else {
//System.out.println ("FeatureVector toString: i="+i+" index="+indices[i]);
sb.append (dictionary.lookupObject(indices[i]).toString());
//sb.append ("("+indices[i]+")");
}
//sb.append ("= 1.0 (forced binary)");
if (!onOneLine)
sb.append ('\n');
else
sb.append (' ');
}
} else {
//System.out.println ("FeatureVector toString values!=null");
int valuesLength = numLocations();
for (int i = 0; i < valuesLength; i++) {
int idx = indices == null ? i : indices[i];
if (dictionary == null)
sb.append ("["+i+"]");
else {
sb.append (dictionary.lookupObject(idx).toString());
sb.append ("(" + idx +")");
}
sb.append ("=");
sb.append (values[i]);
if (!onOneLine)
sb.append ("\n");
else
sb.append (' ');
}
}
return sb.toString();
}
public Alphabet getAlphabet ()
{
return dictionary;
}
public Alphabet[] getAlphabets()
{
return new Alphabet[] {dictionary};
}
public boolean alphabetsMatch (AlphabetCarrying object)
{
return dictionary.equals (object.getAlphabet());
}
public int location (Object entry)
{
if (dictionary == null)
throw new IllegalStateException ("This FeatureVector has no dictionary.");
int i = dictionary.lookupIndex (entry, false);
if (i < 0)
return -1;
else
return location (i);
}
public boolean contains (Object entry)
{
int loc = location(entry);
return (loc >= 0 && valueAtLocation(loc) != 0);
}
public double value (Object o)
{
int loc = location (o);
if (loc >= 0)
return valueAtLocation (loc);
else
throw new IllegalArgumentException ("Object "+o+" is not a key in the dictionary.");
}
//Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject (dictionary);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
dictionary = (Alphabet) in.readObject();
}
}
| 15,231 | 31.067368 | 112 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/LabelAlphabet.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types;
import java.util.ArrayList;
import java.util.HashMap;
import java.io.*;
import cc.mallet.types.Alphabet;
/**
A mapping from arbitrary objects (usually String's) to integers
(and corresponding Label objects) and back.
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
public class LabelAlphabet extends Alphabet implements Serializable
{
ArrayList labels;
public LabelAlphabet ()
{
super();
this.labels = new ArrayList ();
}
public int lookupIndex (Object entry, boolean addIfNotPresent)
{
int index = super.lookupIndex (entry, addIfNotPresent);
if (index >= labels.size() && addIfNotPresent)
labels.add (new Label (entry, this, index));
return index;
}
public Label lookupLabel (Object entry, boolean addIfNotPresent)
{
int index = lookupIndex (entry, addIfNotPresent);
if (index >= 0)
return (Label) labels.get(index);
else
return null;
}
public Label lookupLabel (Object entry)
{
return this.lookupLabel (entry, true);
}
public Label lookupLabel (int labelIndex)
{
return (Label) labels.get(labelIndex);
}
}
| 1,567 | 23.888889 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/MultiInstanceList.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types;
import java.io.Serializable;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.ListIterator;
import java.util.NoSuchElementException;
import java.util.Random;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
/**
* An implementation of InstanceList that logically combines multiple instance
* lists so that they appear as one list without copying the original lists.
* This is useful when running cross-validation experiments with large data sets.
*
* Any operation that would modify the size of the list is not supported.
*
* @see InstanceList
*
* @author Michael Bond <a href="mailto:[email protected]">[email protected]</a>
*/
public class MultiInstanceList extends InstanceList {
private static final long serialVersionUID = -7177121200386974657L;
private static final InstanceList[] EMPTY_ARRAY = new InstanceList[0];
private final InstanceList[] lists;
private final int[] offsets;
private class MultiIterator implements Iterator<Instance>, Serializable {
private static final long serialVersionUID = -2446488635289279133L;
int index = 0;
Iterator<Instance> i;
public MultiIterator () {
this.i = lists.length == 0 ? null : lists[0].iterator ();
}
public boolean hasNext () {
if (this.index < lists.length) {
if (this.i.hasNext ()) {
return true;
}
for (int tmpIndex = this.index + 1; tmpIndex < lists.length; tmpIndex++) {
final InstanceList list = lists[tmpIndex];
if (list != null && lists[tmpIndex].size () > 0) {
return true;
}
}
}
return false;
}
public Instance next () {
if (this.index < lists.length) {
if (this.i.hasNext ()) {
return this.i.next ();
}
for (this.index++; this.index < lists.length; this.index++) {
final InstanceList list = lists[this.index];
if (list != null && lists[this.index].size () > 0) {
this.i = lists[this.index].iterator ();
return this.i.next ();
}
}
}
throw new NoSuchElementException ();
}
public void remove () {
throw new UnsupportedOperationException ();
}
}
/**
* Constructs a {@link MultiInstanceList} with an array of {@link InstanceList}
*
* @param lists Array of {@link InstanceList} to logically combine
*/
public MultiInstanceList (InstanceList[] lists) {
super (lists[0].getPipe ());
this.lists = lists;
this.offsets = new int[lists.length];
// build index offsets array and populate instance weights
int offset = 0;
for (int i = 0; i < lists.length; i++) {
this.offsets[i] = offset;
offset += lists[i].size ();
if (lists[i].instWeights != null) {
if (this.instWeights == null) {
this.instWeights = new HashMap<Instance,Double> ();
}
this.instWeights.putAll (instWeights);
}
}
}
/**
* Constructs a {@link MultiInstanceList} with a {@link List} of {@link InstanceList}
*
* @param lists List of {@link InstanceList} to logically combine
*/
public MultiInstanceList (List<InstanceList> lists) {
this (lists.toArray (EMPTY_ARRAY));
}
public boolean add (Instance instance, double instanceWeight) {
throw new UnsupportedOperationException ();
}
public boolean add (Instance instance) {
throw new UnsupportedOperationException ();
}
public void add (int index, Instance element) {
throw new UnsupportedOperationException ();
}
public void clear () {
throw new UnsupportedOperationException ();
}
public Object clone () {
InstanceList[] newLists = new InstanceList[this.lists.length];
for (int i = 0; i < this.lists.length; i++) {
newLists[i] = (InstanceList) this.lists[i].clone ();
}
return new MultiInstanceList (newLists);
}
public InstanceList cloneEmpty () {
InstanceList[] newLists = new InstanceList[this.lists.length];
for (int i = 0; i < this.lists.length; i++) {
newLists[i] = this.lists[i].cloneEmpty ();
}
return new MultiInstanceList (newLists);
}
protected InstanceList cloneEmptyInto (InstanceList ret) {
throw new UnsupportedOperationException ();
}
public boolean contains (Object elem) {
for (InstanceList list : this.lists) {
if (list != null && list.contains (elem)) {
return true;
}
}
return false;
}
public CrossValidationIterator crossValidationIterator (int nfolds, int seed) {
throw new UnsupportedOperationException ();
}
public CrossValidationIterator crossValidationIterator (int nfolds) {
throw new UnsupportedOperationException ();
}
public void ensureCapacity (int minCapacity) {
throw new UnsupportedOperationException ();
}
public boolean equals (Object o) {
if (o instanceof MultiInstanceList) {
MultiInstanceList tmp = (MultiInstanceList) o;
if (tmp.lists.length != this.lists.length) {
return false;
}
for (int i = 0; i < this.lists.length; i++) {
InstanceList thisList = this.lists[i];
InstanceList tmpList = tmp.lists[i];
if (thisList == null && tmpList != null) {
return false;
} else if (!thisList.equals (tmpList)) {
return false;
}
}
return true;
}
return false;
}
//@Override
public Instance get (int index) {
int i = getOffsetIndex (index);
return this.lists[i].get (index - this.offsets[i]);
}
/**
* Gets the index into the offsets array for the given element index
*
* @param index Index of element
* @return Index into offsets, will always give a valid index
*/
private int getOffsetIndex (int index) {
int i = Arrays.binarySearch (this.offsets, index);
if (i < 0) {
i = (-i) - 2;
}
return i;
}
//@Override
public int hashCode () {
int hashCode = 1;
for (InstanceList list : this.lists) {
hashCode = 31*hashCode + (list==null ? 0 : list.hashCode ());
}
return hashCode;
}
//@Override
public int indexOf (Object elem) {
for (int i = 0; i < this.lists.length; i++) {
int index = this.lists[i].indexOf (elem);
if (index != -1) {
return index + this.offsets[i];
}
}
return -1;
}
//@Override
public boolean isEmpty () {
for (InstanceList list : this.lists) {
if (list != null && !list.isEmpty ()) {
return true;
}
}
return false;
}
//@Override
public Iterator<Instance> iterator () {
return new MultiIterator ();
}
//@Override
public int lastIndexOf (Object elem) {
for (int i = this.lists.length - 1; i >= 0; i--) {
int index = this.lists[i].lastIndexOf (elem);
if (index != -1) {
return index + this.offsets[i];
}
}
return -1;
}
//@Override
public ListIterator<Instance> listIterator () {
throw new UnsupportedOperationException ();
}
//@Override
public ListIterator<Instance> listIterator (int index) {
throw new UnsupportedOperationException ();
}
//@Override
public boolean remove (Instance instance) {
throw new UnsupportedOperationException ();
}
//@Override
public Instance remove (int index) {
throw new UnsupportedOperationException ();
}
//@Override
public boolean remove (Object o) {
throw new UnsupportedOperationException ();
}
//@Override
public Instance set (int index, Instance instance) {
int i = getOffsetIndex (index);
return this.lists[i].set (index - this.offsets[i], instance);
}
//@Override
public void setInstance (int index, Instance instance) {
int i = getOffsetIndex (index);
this.lists[i].setInstance (index - this.offsets[i], instance);
}
//@Override
public void setInstanceWeight (Instance instance, double weight) {
super.setInstanceWeight (instance, weight);
int index = indexOf (instance);
int i = getOffsetIndex (index);
this.lists[i].setInstanceWeight (index - this.offsets[i], weight);
}
//@Override
public InstanceList shallowClone () {
InstanceList[] newLists = new InstanceList[this.lists.length];
for (int i = 0; i < this.lists.length; i++) {
newLists[i] = this.lists[i].shallowClone ();
}
return new MultiInstanceList (newLists);
}
//@Override
public void shuffle (Random r) {
throw new UnsupportedOperationException ();
}
//@Override
public int size () {
int size = 0;
for (InstanceList list : this.lists) {
if (list != null) {
size += list.size ();
}
}
return size;
}
//@Override
public InstanceList[] split (double[] proportions) {
throw new UnsupportedOperationException ();
}
//@Override
public InstanceList[] split (Random r, double[] proportions) {
throw new UnsupportedOperationException ();
}
//@Override
public InstanceList[] splitInOrder (double[] proportions) {
throw new UnsupportedOperationException ();
}
//@Override
public InstanceList[] splitInOrder (int[] counts) {
throw new UnsupportedOperationException ();
}
//@Override
public InstanceList[] splitInTwoByModulo (int m) {
throw new UnsupportedOperationException ();
}
//@Override
public InstanceList subList (double proportion) {
throw new UnsupportedOperationException ();
}
//@Override
public InstanceList subList (int start, int end) {
throw new UnsupportedOperationException ();
}
//@Override
public Object[] toArray () {
Object[] result = new Object[size ()];
int i = 0;
for (InstanceList list : this.lists) {
if (list != null) {
for (Instance instance : list) {
result[i++] = instance;
}
}
}
return result;
}
@SuppressWarnings("unchecked")
//@Override
public <T> T[] toArray (T[] a) {
int size = size ();
if (a.length < size) {
a = (T[])java.lang.reflect.Array
.newInstance (a.getClass ().getComponentType (), size);
}
Object[] result = a;
int i = 0;
for (InstanceList list : this.lists) {
if (list != null) {
for (Instance instance : list) {
result[i++] = instance;
}
}
}
if (a.length > size)
a[size] = null;
return a;
}
//@Override
public String toString () {
StringBuffer buf = new StringBuffer ();
buf.append ("[");
for (int listIndex = 0; listIndex < this.lists.length; listIndex++) {
if (this.lists[listIndex] != null) {
Iterator<Instance> i = this.lists[listIndex].iterator ();
boolean hasNext = i.hasNext ();
while (hasNext) {
Instance o = i.next ();
buf.append (String.valueOf (o));
hasNext = i.hasNext ();
if (listIndex < this.lists.length || hasNext) {
buf.append (", ");
}
}
}
}
buf.append ("]");
return buf.toString ();
}
//@Override
public void trimToSize () {
for (InstanceList list : this.lists) {
list.trimToSize ();
}
}
}
| 13,220 | 28.38 | 90 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/StringEditVector.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.util.logging.*;
import java.util.StringTokenizer;
import java.io.*;
import cc.mallet.util.MalletLogger;
public class StringEditVector implements Serializable
{
private static Logger logger = MalletLogger.getLogger(StringEditVector.class.getName());
String _delimiter;
String _string1 = null, _string2 = null;
int _match = -2;
public static final int MATCH = 1;
public static final int NONMATCH = 0;
public StringEditVector(String delimiter) {
if (delimiter == null || delimiter.equals(""))
_delimiter = " ";
else
_delimiter = delimiter;
}
public StringEditVector() {
this (" ");
}
public String formatString() {
return "<String1>" + _delimiter + "<String2>" + _delimiter + "<BooleanMatch>";
}
public boolean parseString(String line) {
StringTokenizer stok = new StringTokenizer(line, _delimiter);
boolean success = true;
// First String
if (stok.hasMoreTokens()) _string1 = stok.nextToken();
else success = false;
// Second String
if (stok.hasMoreTokens()) _string2 = stok.nextToken();
else success = false;
// Match/non-Match
if (stok.hasMoreTokens())
try {
_match = Integer.parseInt(stok.nextToken());
}
catch (Exception e) {
logger.info ("Error while returning third integer - " + e.getMessage());
_match = -1;
success = false;
}
else success = false;
return success;
}
public void setFirstString(String s1) {
_string1 = s1;
}
public String getFirstString() {
return _string1;
}
public char getFirstStringChar(int index) {
index = index - 1;
if (index < 0 || index >= _string1.length()) return (char) 0;
else return _string1.charAt(index);
}
public int getLengthFirstString() {
return _string1.length();
}
public void setSecondString(String s2) {
_string2 = s2;
}
public String getSecondString() {
return _string2;
}
public char getSecondStringChar(int index) {
index = index - 1;
if (index < 0 || index >= _string2.length()) return (char) 0;
else return _string2.charAt(index);
}
public int getLengthSecondString() {
return _string2.length();
}
public void setMatch(int match) {
_match = match;
}
public int getMatch() {
return _match;
}
//Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject (_delimiter);
out.writeObject (_string1);
out.writeObject (_string2);
out.writeInt (_match);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
_delimiter = (String) in.readObject();
_string1 = (String) in.readObject();
_string2 = (String) in.readObject();
_match = in.readInt();
}
}
| 3,525 | 24.185714 | 93 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/ArraySequence.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.util.ArrayList;
import cc.mallet.types.Sequence;
public class ArraySequence<E> implements Sequence<E>
{
E[] data;
public ArraySequence (ArrayList<E> a)
{
data = (E[])new Object[a.size()];
for (int i = 0; i < a.size(); i++)
data[i] = a.get(i);
}
public ArraySequence (E[] a, boolean copy)
{
if (copy) {
data = (E[])new Object[a.length];
System.arraycopy (a, 0, data, 0, a.length);
} else
data = a;
}
public ArraySequence (E[] a)
{
this (a, true);
}
protected ArraySequence (Sequence<E> s, boolean copy)
{
if (s instanceof ArraySequence) {
if (copy) {
data = (E[])new Object[s.size()];
System.arraycopy (((ArraySequence)s).data, 0, data, 0, data.length);
} else
data = ((ArraySequence<E>)s).data;
} else {
data = (E[])new Object[s.size()];
for (int i = 0; i < s.size(); i++)
data[i] = s.get(i);
}
}
public E get (int index)
{
return data[index];
}
public int size ()
{
return data.length;
}
public String toString() {
String toret = "";
for (int i = 0; i < data.length; i++) {
toret += " " + data[i];
}
return toret;
}
}
| 1,696 | 20.2125 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/PagedInstanceList.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.rmi.dgc.VMID;
import java.util.BitSet;
import java.util.Map;
import cc.mallet.pipe.Noop;
import cc.mallet.pipe.Pipe;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Label;
import cc.mallet.types.LabelAlphabet;
import cc.mallet.types.MatrixOps;
/**
An InstanceList which avoids OutOfMemoryErrors by saving Instances
to disk when there is not enough memory to create a new
Instance. It implements a fixed-size paging scheme, where each page
on disk stores <code>instancesPerPage</code> Instances. So, while
the number of Instances per pages is constant, the size in bytes of
each page may vary. Using this class instead of InstanceList means
the number of Instances you can store is essentially limited only
by disk size (and patience).
The paging scheme is optimized for the most frequent case of
looping through the InstanceList from index 0 to n. If there are n
instances, then instances 0->(n/size()) are stored together on page
1, instances (n/size)+1 -> 2*(n/size) are on page 2, ... etc. This
way, pages adjacent in the <code>instances</code> list will usually
be in the same page.
@see InstanceList
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
public class PagedInstanceList extends InstanceList
{
private static final char TYPE_FEATURE_VECTOR = 'F';
private static final char TYPE_LABEL = 'L';
private static final char TYPE_OBJECT = 'O';
/** number of instances to put in one page */
int instancesPerPage;
/** directory to store swap files */
File swapDir;
/** array of page numbers that represent the in-memory pages */
int[] inMemoryPageIds;
/** array of instance lists that represent the in-memory pages */
InstanceList[] inMemoryPages;
/** dirty.get(i) == true if in-memory bin i is dirty */
BitSet dirty = new BitSet();
/** Total number of instances in list, including those swapped out */
int size = 0;
/** recommend garbage collection after every swap out? */
boolean collectGarbage = true;
/** Total number of swap-ins */
int swapIns = 0;
/** Total time spent in swap-ins */
long swapInTime = 0;
/** Total number of swap-outs */
int swapOuts = 0;
/** Total time spent in swap-ins */
long swapOutTime = 0;
/** uniquely identifies this InstanceList. Used in creating
* serialized page name for swap files. */
VMID id = new VMID();
/** Avoids creating a new noop pipe for each page */
Pipe noopPipe;
// CONSTRUCTORS
/** Creates a PagedInstanceList where "instancesPerPage" instances
* are swapped to disk in directory "swapDir" if the amount of free
* system memory drops below "minFreeMemory" bytes
* @param pipe instance pipe
* @param numPages number of pages to keep in memory
* @param instancesPerPage number of Instances to store in each page
* @param swapDir where the pages on disk live.
*/
public PagedInstanceList (Pipe pipe, int numPages, int instancesPerPage, File swapDir) {
super (pipe, numPages * instancesPerPage);
this.instancesPerPage = instancesPerPage;
this.swapDir = swapDir;
this.inMemoryPageIds = new int[numPages];
this.inMemoryPages = new InstanceList[numPages];
this.noopPipe = new Noop(pipe.getDataAlphabet(), pipe.getTargetAlphabet());
for (int i = 0; i < numPages; i++) {
this.inMemoryPageIds[i] = -1;
}
try {
if (!swapDir.exists()) {
swapDir.mkdir();
}
} catch (SecurityException e) {
System.err.println ("No permission to make directory " + swapDir);
System.exit(-1);
}
}
public PagedInstanceList (Pipe pipe, int numPages, int instancesPerPage) {
this (pipe, numPages, instancesPerPage, new File ("."));
}
// SPLITTING AND SAMPLING METHODS
/** Shuffles elements of an array, taken from Collections.shuffle
* @param r The source of randomness to use in shuffling.
* @param a Array to shuffle
*/
private void shuffleArray (java.util.Random r, int[] a) {
int size = a.length;
// Shuffle array
for (int i = size - 1; i > 0; i--) {
int swap = r.nextInt(i + 1);
int tmp = a[i];
a[i] = a[swap];
a[swap] = tmp;
}
}
/**
* Shuffles the elements of this list among several smaller
* lists. Overrides InstanceList.split to add instances in original
* order, to prevent thrashing.
* @param proportions A list of numbers (not necessarily summing to 1) which,
* when normalized, correspond to the proportion of elements in each returned
* sublist.
* @param r The source of randomness to use in shuffling.
* @return one <code>InstanceList</code> for each element of <code>proportions</code>
*/
public InstanceList[] split (java.util.Random r, double[] proportions) {
InstanceList[] ret = new InstanceList[proportions.length];
double maxind[] = proportions.clone();
int size = size();
int[] shuffled = new int[size];
int[] splits = new int[size];
// build a list of shuffled instance indexes
for (int i = 0; i < size; i++) {
shuffled[i] = i;
}
shuffleArray(r, shuffled);
MatrixOps.normalize(maxind);
for (int i = 0; i < maxind.length; i++) {
ret[i] = this.cloneEmpty(); // Note that we are passing on featureSelection here.
if (i > 0)
maxind[i] += maxind[i-1];
}
for (int i = 0; i < maxind.length; i++) {
// Fill maxind[] with the highest instance index to go in each corresponding returned InstanceList
maxind[i] = Math.rint (maxind[i] * size);
}
for (int i = 0, j = 0; i < size; i++) {
// This gives a slight bias toward putting an extra instance in the last InstanceList.
while (i >= maxind[j] && j < ret.length)
j++;
splits[shuffled[i]] = j;
}
for (int i = 0; i < size; i++) {
//logger.info ("adding instance " + i + " to split ilist " + splits[i]);
ret[splits[i]].add(this.get(i));
}
return ret;
}
// PAGING METHODS
/** Gets the swap file for the specified page
* @param page Page to get swap file for
* @return Swap file
*/
private File getFileForPage (int page) {
return new File (swapDir, id + "." + page);
}
/** Gets the page for the specified instance index, swapping in if necessary
* @param index Instance index to get page for
* @param dirty If true mark page as dirty
* @return Page for the specified instance index
*/
private InstanceList getPageForIndex (int index, boolean dirty) {
if (index > this.size) {
throw new IndexOutOfBoundsException (
"Index: " + index + ", Size: "+ this.size);
}
return swapIn (index / this.instancesPerPage, dirty);
}
/** Swaps in the specified page
* @param pageId Page to swap in
* @param dirty If true mark page as dirty
* @return The page that was just swapped in */
private InstanceList swapIn (int pageId, boolean dirty) {
int bin = pageId % this.inMemoryPages.length;
if (this.inMemoryPageIds[bin] != pageId) {
swapOut (this.inMemoryPageIds[bin]);
long startTime = System.currentTimeMillis ();
File pageFile = getFileForPage (pageId);
ObjectInputStream in = null;
try {
in = new ObjectInputStream (new FileInputStream (pageFile));
InstanceList page = deserializePage(in);
this.inMemoryPageIds[bin] = pageId;
this.inMemoryPages[bin] = page;
} catch (Exception e) {
System.err.println (e);
System.exit (-1);
} finally {
if (in != null) {
try {
in.close();
} catch (Exception e) {
System.err.println (e);
System.exit (-1);
}
}
}
this.swapIns++;
this.swapInTime += System.currentTimeMillis () - startTime;
}
if (dirty) {
this.dirty.set (bin);
}
return this.inMemoryPages[bin];
}
/** Swaps out the page in the specified bin if it is dirty
* @param pageId Page to swap out
*/
private void swapOut (int pageId) {
int bin = pageId % this.inMemoryPages.length;
if (pageId != -1 && this.dirty.get (bin)) {
long startTime = System.currentTimeMillis ();
File pageFile = getFileForPage (pageId);
ObjectOutputStream out = null;
try {
out = new ObjectOutputStream (new FileOutputStream (pageFile));
InstanceList page = this.inMemoryPages[bin];
this.inMemoryPageIds[bin] = -1;
this.inMemoryPages[bin] = null;
serializePage(out, page);
this.dirty.set(bin, false);
} catch (Exception e) {
System.err.println (e);
System.exit (-1);
} finally {
if (out != null) {
try {
out.close ();
}
catch (Exception e) {
System.err.println (e);
System.exit (-1);
}
}
}
if (this.collectGarbage) {
System.gc();
}
this.swapOuts++;
this.swapOutTime += System.currentTimeMillis () - startTime;
}
}
// ACCESSORS
/** Appends the instance to this list. Note that since memory for
* the Instance has already been allocated, no check is made to
* catch OutOfMemoryError.
* @return <code>true</code> if successful
*/
public boolean add (Instance instance) {
InstanceList page;
if (this.size % this.instancesPerPage == 0) {
// this is the start of a new page, swap out the one in this pages
// spot and create a new one
int pageId = this.size / this.instancesPerPage;
int bin = pageId % this.inMemoryPages.length;
swapOut (this.inMemoryPageIds[bin]);
page = new InstanceList (this.noopPipe);
this.inMemoryPageIds[bin] = pageId;
this.inMemoryPages[bin] = page;
} else {
page = getPageForIndex (this.size, true);
}
boolean ret = page.add (instance);
if (ret) {
this.size++;
}
return ret;
}
/** Returns the <code>Instance</code> at the specified index. If
* this Instance is not in memory, swap a block of instances back
* into memory. */
public Instance get (int index) {
InstanceList page = getPageForIndex (index, false);
return page.get (index % this.instancesPerPage);
}
/** Replaces the <code>Instance</code> at position
* <code>index</code> with a new one. Note that this is the only
* sanctioned way of changing an Instance. */
public Instance set (int index, Instance instance) {
InstanceList page = getPageForIndex (index, true);
return page.set (index % this.instancesPerPage, instance);
}
public boolean getCollectGarbage () {
return this.collectGarbage;
}
public void setCollectGarbage (boolean b) {
this.collectGarbage = b;
}
public InstanceList shallowClone () {
InstanceList ret = this.cloneEmpty ();
for (int i = 0; i < this.size (); i++) {
ret.add (get (i));
}
return ret;
}
public InstanceList cloneEmpty () {
return super.cloneEmptyInto (new PagedInstanceList (
this.pipe,
this.inMemoryPages.length,
this.instancesPerPage,
this.swapDir));
}
public void clear () {
int numPages = this.size / this.instancesPerPage;
for (int i = 0; i <= numPages; i++) {
getFileForPage (i).delete ();
}
for (int i = 0; i < this.inMemoryPages.length; i++) {
this.inMemoryPages[i] = null;
this.inMemoryPageIds[i] = -1;
}
this.size = 0;
this.swapIns = 0;
this.swapInTime = 0;
this.swapOuts = 0;
this.swapOutTime = 0;
this.dirty.clear ();
super.clear ();
}
public int getSwapIns () {
return this.swapIns;
}
public long getSwapInTime () {
return this.swapInTime;
}
public int getSwapOuts () {
return this.swapOuts;
}
public long getSwapOutTime () {
return this.swapOutTime;
}
public int size () {
return this.size;
}
/** Serializes a single object without metadata
* @param out
* @param object
* @throws IOException
*/
private void serializeObject (ObjectOutputStream out, Object obj)
throws IOException {
if (obj instanceof FeatureVector) {
FeatureVector features = (FeatureVector) obj;
out.writeChar (TYPE_FEATURE_VECTOR);
out.writeObject (features.getIndices ());
out.writeObject (features.getValues ());
}
else if (obj instanceof Label) {
out.writeChar (TYPE_LABEL);
out.writeObject (((Label) obj).toString ());
} else {
out.writeChar (TYPE_OBJECT);
out.writeObject (obj);
}
}
/** Serialize a page without metadata. This attempts to serialize the
* minimum amount needed to restore the page, leaving out redundant data
* such as pipes and dictionaries.
* @param out Object output stream
* @param page
* @throws IOException
*/
private void serializePage (ObjectOutputStream out, InstanceList page)
throws IOException {
out.writeInt (page.size ());
for (Instance inst : page) {
serializeObject (out, inst.getData ());
serializeObject (out, inst.getTarget ());
out.writeObject (inst.getName ());
out.writeObject (inst.getSource ());
if (this.instWeights != null) {
Double weight = this.instWeights.get (inst);
if (weight != null) {
out.writeDouble (this.instWeights.get (inst));
} else {
out.writeDouble (1.0);
}
} else {
out.writeDouble (1.0);
}
}
}
/** Deserialize an object serialized using
* {@link #serializeObject(ObjectOutputStream, Object)}.
* @throws IOException
* @throws ClassNotFoundException
*/
private Object deserializeObject (ObjectInputStream in)
throws IOException, ClassNotFoundException {
char type = in.readChar ();
Object obj;
switch (type) {
case TYPE_LABEL:
LabelAlphabet ldict = (LabelAlphabet) getTargetAlphabet ();
String name = (String) in.readObject ();
obj = ldict.lookupLabel (name);
break;
case TYPE_FEATURE_VECTOR:
int[] indices = (int[]) in.readObject ();
double[] values = (double[]) in.readObject ();
obj = new FeatureVector(getDataAlphabet (), indices, values);
break;
case TYPE_OBJECT:
obj = in.readObject ();
break;
default:
throw new IOException ("Unknown object type " + type);
}
return obj;
}
/** Deserialize a page. This restores a page serialized using
* {@link #serializePage(ObjectOutputStream, InstanceList)}.
* @param in Object input stream
* @return New page
* @throws IOException
* @throws ClassNotFoundException
*/
private InstanceList deserializePage(ObjectInputStream in)
throws IOException, ClassNotFoundException {
InstanceList page = new InstanceList(noopPipe);
int size = in.readInt();
for (int i = 0; i < size; i++) {
Object data = deserializeObject (in);
Object target = deserializeObject (in);
Object name = in.readObject ();
Object source = in.readObject ();
double weight = in.readDouble ();
page.add (new Instance (data, target, name, source), weight);
}
return page;
}
/** Constructs a new <code>InstanceList</code>, deserialized from
* <code>file</code>. If the string value of <code>file</code> is
* "-", then deserialize from {@link System.in}. */
public static InstanceList load (File file) {
try {
ObjectInputStream ois;
if (file.toString ().equals ("-"))
ois = new ObjectInputStream (System.in);
else
ois = new ObjectInputStream (new FileInputStream (file));
PagedInstanceList ilist = (PagedInstanceList) ois.readObject();
ois.close();
return ilist;
} catch (Exception e) {
e.printStackTrace ();
throw new IllegalArgumentException ("Couldn't read PagedInstanceList from file "+file);
}
}
// Serialization of PagedInstanceList
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject (this.id);
out.writeObject (this.pipe);
// memory attributes
out.writeInt (this.instancesPerPage);
out.writeObject (this.swapDir);
out.writeObject(this.inMemoryPageIds);
out.writeObject (this.dirty);
for (int i = 0; i < this.inMemoryPages.length; i++) {
serializePage(out, this.inMemoryPages[i]);
}
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
this.id = (VMID) in.readObject ();
this.pipe = (Pipe) in.readObject();
// memory attributes
this.instancesPerPage = in.readInt ();
this.swapDir = (File) in.readObject ();
this.inMemoryPageIds = (int[]) in.readObject();
this.dirty = (BitSet) in.readObject ();
this.inMemoryPages = new InstanceList[this.inMemoryPageIds.length];
for (int i = 0; i < this.inMemoryPageIds.length; i++) {
this.inMemoryPages[i] = deserializePage(in);
}
}
}
| 19,955 | 33.525952 | 110 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Labeler.java | package cc.mallet.types;
public interface Labeler {
/** Given the (presumably unlabeled) instanceToLabel, set its target field to the true label.
* @return true if labeling occurred successfully, false if for some reason the instance could not be labeled. */
public boolean label (Instance instanceToLabel);
}
| 315 | 38.5 | 114 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Minkowski.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/** Minkowski Metric, also known as the L_p norm. Special cases include the
* Manhatten city-block distance with q=1 and the Euclidean distance with
* q=2. The special case with q equal to positive infinity is supported.
*
* @author Jerod Weinman <A HREF="mailto:[email protected]">[email protected]</A>
*/
package cc.mallet.types;
import java.lang.Math;
import cc.mallet.types.SparseVector;
public class Minkowski implements Metric {
double q;
/** Constructor for Minkowski metric.
*
* @param q Power of component wise absolute difference; must be at least 1
*/
public Minkowski( double q )
{
if (q<1)
throw new IllegalArgumentException("Argument q must be at least 1.");
//assert( q>= 1 );
this.q = q;
}
/** Gives the Minkowski distance between two vectors.
*
* distance(x,y) := \left( \Sum_i=0^d-1 \left| x_i - y_i \right|^q \right)^\frac{1}{q}
*
* for 1<=q<infinity. For q=infinity
*
* distance(x,y) := max_i \left| x_i - y_i \right|
*/
public double distance( SparseVector a, SparseVector b)
{
double dist = 0;
double diff;
if (a==null || b==null)
throw new IllegalArgumentException("Distance from a null vector is undefined.");
//assert (a != null);
//assert (b != null);
if (a.numLocations() != b.numLocations() )
throw new IllegalArgumentException("Vectors must be of the same dimension.");
//assert (a.numLocations() == b.numLocations() );
for (int i=0 ; i< a.numLocations() ; i++ )
{
diff = Math.abs( a.valueAtLocation(i) - b.valueAtLocation(i));
if (q==1)
dist += diff;
else if (q==2)
dist += diff*diff;
else if (q==Double.POSITIVE_INFINITY)
if ( diff > dist)
dist = diff;
else
dist += Math.pow( diff, q );
}
if (q==1 || q==Double.POSITIVE_INFINITY)
return dist;
else if (q==2)
return Math.sqrt( dist );
else
return Math.pow( dist, 1/q);
}
public double euclideanDistance(SparseVector a, SparseVector b) {
double dist = 0;
double diff;
if (a==null || b==null)
throw new IllegalArgumentException("Distance from a null vector is undefined.");
int aLen = a.numLocations();
int bLen = b.numLocations();
int ia = 0;
int ib = 0;
int indicea, indiceb;
while (ia < aLen && ib < bLen) {
indicea = a.indexAtLocation(ia);
indiceb = b.indexAtLocation(ib);
if(indicea < indiceb) {
diff = a.valueAtLocation(ia);
ia ++;
}
else {
if(indicea == indiceb) {
diff = Math.abs(a.valueAtLocation(ia) - b.valueAtLocation(ib));
ia ++;
ib ++;
}
else
{
diff = b.valueAtLocation(ib);
ib ++;
}
}
dist += diff * diff;
}
while(ia < aLen) {
diff = a.valueAtLocation(ia);
dist += diff * diff;
}
while(ib < bLen) {
diff = b.valueAtLocation(ib);
dist += diff * diff;
}
dist = Math.sqrt(dist);
return dist;
}
}
| 3,362 | 23.911111 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/LabelSequence.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.io.*;
import cc.mallet.types.FeatureVectorSequence.Iterator;
public class LabelSequence extends FeatureSequence implements AlphabetCarrying, Serializable
{
public LabelSequence (LabelAlphabet dict, int[] features)
{
super (dict, features);
}
public LabelSequence (LabelAlphabet dict, int capacity)
{
super (dict, capacity);
}
private static int[] getFeaturesFromLabels (Label[] labels)
{
int[] features = new int[labels.length];
for (int i = 0; i < labels.length; i++)
features[i] = labels[i].getIndex();
return features;
}
public LabelSequence (Label[] labels)
{
super (labels[0].getLabelAlphabet(), getFeaturesFromLabels (labels));
}
public LabelSequence (Alphabet dict)
{
super (dict);
}
public LabelAlphabet getLabelAlphabet () { return (LabelAlphabet) dictionary; }
public Label getLabelAtPosition (int pos)
{
return ((LabelAlphabet)dictionary).lookupLabel (features[pos]);
}
public class Iterator implements java.util.Iterator {
int pos;
public Iterator () {
pos = 0;
}
public Object next() {
return getLabelAtPosition(pos++);
}
public int getIndex () {
return pos;
}
public boolean hasNext() {
return pos < features.length;
}
public void remove () {
throw new UnsupportedOperationException ();
}
}
public Iterator iterator ()
{
return new Iterator();
}
// ???
//public Object get (int pos) { return getLabelAtPosition (pos); }
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt(CURRENT_SERIAL_VERSION);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
}
}
| 2,377 | 21.647619 | 92 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/AugmentableFeatureVector.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import cc.mallet.util.PropertyList;
public class AugmentableFeatureVector extends FeatureVector implements Serializable
{
int size; // max index with valid indices[] or values[] value
int maxSortedIndex; /* if indices != null, top of values[] and indices[]
may be unsorted indices. */
// xxx Also make constructors for dense vectors, and add the appropriate
// functionality in methods below.
/** To make a binary vector, pass null for "values" */
public AugmentableFeatureVector (Alphabet dict,
int[] indices, double[] values,
int capacity, int size,
boolean copy, boolean checkIndicesSorted,
boolean removeDuplicates)
{
super (dict, indices, values, capacity, size, copy, checkIndicesSorted, removeDuplicates);
// set this.size and this.maxSortedIndex if not already set via sortIndices and removeDuplicates
if (! checkIndicesSorted) {
if (! removeDuplicates) {
this.size = size;
}
this.maxSortedIndex = this.size - 1;
}
}
public AugmentableFeatureVector (Alphabet dict,
int[] indices, double[] values, int capacity, boolean copy,
boolean checkIndicesSorted) {
this (dict, indices, values, capacity, indices.length, copy, checkIndicesSorted, true);
}
public AugmentableFeatureVector (Alphabet dict,
int[] indices, double[] values, int capacity, boolean copy) {
this (dict, indices, values, capacity, indices.length, copy, true, true); }
public AugmentableFeatureVector (Alphabet dict,
int[] indices, double[] values, int capacity) {
this (dict, indices, values, capacity, indices.length, true, true, true); }
public AugmentableFeatureVector (Alphabet dict, double[] values, int capacity) {
this (dict, null, values, capacity, values.length, true, true, true); }
public AugmentableFeatureVector (Alphabet dict, double[] values) {
this (dict, null, values, values.length, values.length, true, true, true); }
public AugmentableFeatureVector (Alphabet dict, int capacity, boolean binary) {
// yyy
this (dict, new int[capacity], binary ? null : new double[capacity],
capacity, 0, false, false, false); }
public AugmentableFeatureVector (Alphabet dict, boolean binary) {
this (dict, 4, binary); }
public AugmentableFeatureVector (Alphabet dict) {
this (dict, false); }
public AugmentableFeatureVector (FeatureVector fv) {
this ((Alphabet)fv.dictionary, fv.indices, fv.values,
fv.indices == null ? fv.values.length : fv.indices.length,
fv.indices == null ? fv.values.length : fv.indices.length,
true, false, false);
}
public AugmentableFeatureVector (FeatureSequence fs, boolean binary) {
this (fs.getAlphabet(), binary);
for (int i = fs.size()-1; i >= 0; i--)
add (fs.getIndexAtPosition(i), 1.0);
}
public AugmentableFeatureVector (Alphabet dict, PropertyList pl, boolean binary,
boolean growAlphabet) {
this (dict, binary);
if (pl == null)
return;
PropertyList.Iterator iter = pl.numericIterator();
while (iter.hasNext()) {
iter.nextProperty();
//System.out.println ("AugmentableVector ("+dict.size()+") adding "+iter.getKey()+" "+iter.getNumericValue());
int index = dict.lookupIndex (iter.getKey(), growAlphabet);
if (index >= 0)
add (index, iter.getNumericValue());
}
}
public AugmentableFeatureVector (Alphabet dict, PropertyList pl, boolean binary) {
this (dict, pl, binary, true);
}
/**
* Adds all indices that are present in some other feature vector
* with value 1.0.
* Beware that this may have unintended effects if
* <tt>fv.dictionary != this.dictionary</tt>
*/
public void add (FeatureVector fv)
{
for (int loc = 0; loc < fv.numLocations (); loc++) {
int index = fv.indexAtLocation (loc);
// [email protected] 3/5/10
// use values, instead of assuming fv is binary
double value = fv.valueAtLocation(loc);
if (location (index) == -1) {
//add (index, 1.0);
add(index,value);
}
}
}
/**
* Adds all features from some other feature vector with weight 1.0.
* The names of the added features are generated by adding a prefix to
* their names in the original feature vector.
* This does not require that <tt>fv.dictionary</tt> equal <tt>this.dictionary</tt>.
* @param fv A feature vector to add from. Its feature names must be Strings.
* @param prefix String to add when generating new feature names
*/
public void add (FeatureVector fv, String prefix)
{
Alphabet otherDict = fv.getAlphabet ();
for (int loc = 0; loc < fv.numLocations (); loc++) {
int idx = fv.indexAtLocation (loc);
String otherName = (String) otherDict.lookupObject (idx);
add (prefix+otherName, 1.0);
}
}
/**
* Adds all features from some other feature vector with weight 1.0.
* The names of the added features are generated by adding a prefix to
* their names in the original feature vector.
* This does not require that <tt>fv.dictionary</tt> equal <tt>this.dictionary</tt>.
* @param fv A feature vector to add from. Its feature names must be Strings.
* @param prefix String to add when generating new feature names
* @param binary true if <tt>fv</tt> is binary
*/
public void add (FeatureVector fv, String prefix, boolean binary)
{
if (binary)
add( fv, prefix);
else {
Alphabet otherDict = fv.getAlphabet ();
for (int loc = 0; loc < fv.numLocations (); loc++) {
int idx = fv.indexAtLocation (loc);
double val = fv.valueAtLocation (loc);
String otherName = (String) otherDict.lookupObject (idx);
add (prefix+otherName, val);
}
}
}
// Aims to be cheap, constant time when (indices != null)
public void add (int index, double value) {
if (values == null && value != 1.0)
throw new IllegalArgumentException ("Trying to add non-1.0 value ("+
dictionary.lookupObject(index)+"="+value+") to binary vector");
assert (index >= 0);
if (indices == null) {
if (index >= values.length) {
int newLength = index + 10; // ???
double[] newValues = new double[newLength]; // ???
System.arraycopy (values, 0, newValues, 0, values.length);
values = newValues;
values[index] = value;
assert (size <= index);
} else {
values[index] += value;
}
if (size <= index)
size = index+1;
} else {
if (size == indices.length) {
int newLength;
if (indices.length == 0)
newLength = 4;
else if (indices.length < 4)
newLength = indices.length * 2;
else if (indices.length < 100)
newLength = (indices.length * 3) / 2;
else
newLength = indices.length + 150;
if (values != null) {
double[] newValues = new double[newLength];
System.arraycopy (values, 0, newValues, 0, values.length);
values = newValues;
}
int[] newIndices = new int[newLength];
System.arraycopy (indices, 0, newIndices, 0, indices.length);
indices = newIndices;
}
//System.out.println ("indices.length="+indices.length+" size="+size);
indices[size] = index;
if (values != null)
values[size] = value;
size++;
}
}
public void add (Object key, double value)
{
//System.out.println ("AugmentableFeatureVector dictionary = "+dictionary+", size = "+dictionary.size());
int index = dictionary.lookupIndex (key);
//System.out.println ("AugmentableFeatureVector index("+key+") = "+index);
assert (index != -1);
add (index, value);
}
public void add (int index) {
if (values != null)
throw new IllegalArgumentException ("Trying to add binary feature to real-valued vector");
assert (index >= 0);
add (index, 1.0);
}
public final int numLocations () {
if (indices == null)
//return values.length;
return size;
if (size-1 != maxSortedIndex)
sortIndices();
return size;
}
public final int location (int index) {
if (indices == null)
return index;
if (size-1 != maxSortedIndex)
sortIndices();
// Arrays.binarySearch (indices, index) doesn't work, because of the unused portion of the array at the end.
for (int i = 0; i < size; i++) {
if (indices[i] == index)
return i;
else if (indices[i] > index)
return -1;
}
return -1;
}
public final double valueAtLocation (int location) {
if (indices == null)
return values[location];
if (size-1 != maxSortedIndex)
sortIndices();
return super.valueAtLocation (location);
}
public final int indexAtLocation (int location) {
if (indices == null)
return location;
if (size-1 != maxSortedIndex)
sortIndices();
assert (location < size);
return super.indexAtLocation (location);
}
public final double value (int index) {
if (indices == null)
return values[index];
if (size-1 != maxSortedIndex)
sortIndices();
int loc = location(index);
if (loc >= 0) {
if (values == null)
return 1.0;
else
return values[loc];
} else
return 0;
}
public final void addTo (double[] accumulator, double scale)
{
if (indices != null && size-1 != maxSortedIndex)
sortIndices();
if (indices == null) {
for (int i = 0; i < size; i++)
accumulator[i] += values[i] * scale;
} else if (values == null) {
for (int i = 0; i < size; i++)
accumulator[indices[i]] += scale;
} else {
for (int i = 0; i < size; i++)
accumulator[indices[i]] += values[i] * scale;
}
}
public final void addTo (double[] accumulator) {
addTo (accumulator, 1.0);
}
public final void setValue (int index, double value) {
if (indices != null && size-1 != maxSortedIndex)
sortIndices();
assert (values != null);
if (indices == null) {
assert (index < size);
values[index] = value;
} else {
values[location(index)] = value;
}
}
public final void setValueAtLocation (int location, double value) {
assert (location < size);
values[location] = value;
}
public ConstantMatrix cloneMatrix () {
return new AugmentableFeatureVector ((Alphabet)dictionary,
indices, values, indices.length, size,
true, false, false);
}
public ConstantMatrix cloneMatrixZeroed () {
if (indices == null)
return new AugmentableFeatureVector (dictionary, new double[values.length]);
else {
int[] newIndices = new int[indices.length];
System.arraycopy (indices, 0, newIndices, 0, indices.length);
return new AugmentableFeatureVector (dictionary, newIndices, new double[values.length],
values.length, values.length,
false, false, false);
}
}
public int singleSize () {
return (indices == null
? values.length
: (size == 0
? 0
: indices[size-1]));
}
public SparseVector toSparseVector () {
if (size-1 != maxSortedIndex)
sortIndices();
//System.out.println ("AugmentableFeatureVector toSparseVector size="+size);
return new SparseVector (indices, values, size, size, true, false, false);
}
public FeatureVector toFeatureVector () {
if (indices != null && size-1 != maxSortedIndex)
sortIndices();
return new FeatureVector ((Alphabet)dictionary,
indices, values, size, size, true, false, false);
}
public double dotProduct (DenseVector v) {
if (indices != null && size-1 != maxSortedIndex)
sortIndices();
double ret = 0;
if (values == null)
for (int i = 0; i < size; i++)
ret += v.value(indices[i]);
else if (indices == null)
for (int i = 0; i < size; i++)
ret += values[i] * v.value(i);
else
for (int i = 0; i < size; i++)
ret += values[i] * v.value(indices[i]);
return ret;
}
public final double dotProduct (SparseVector v) {
if (v instanceof AugmentableFeatureVector)
return dotProduct((AugmentableFeatureVector)v);
if (indices != null && size-1 != maxSortedIndex)
sortIndices();
double ret = 0;
int vl = 0;
int vnl = v.numLocations ();
if (values == null) {
for (int i = 0; i < size; i++) {
while (vl < vnl && v.indexAtLocation(vl) < indices[i])
vl++;
if (vl < vnl && v.indexAtLocation(vl) == indices[i])
ret += v.valueAtLocation(vl);
}
} else if (indices == null) {
for (int i = 0; i < vnl; i++) {
int index = v.indexAtLocation(i);
if (index < size)
ret += v.valueAtLocation(i) * values[index];
}
} else {
for (int loc = 0; loc < size; loc++) {
while (vl < vnl && v.indexAtLocation(vl) < indices[loc])
vl++;
if (vl < vnl && v.indexAtLocation (vl) == indices [loc])
ret += values[loc] * v.value(indices[loc]);
}
}
return ret;
}
public final double dotProduct (AugmentableFeatureVector v) {
if (indices != null && size-1 != maxSortedIndex)
sortIndices();
if (v.indices != null && v.size-1 != v.maxSortedIndex)
v.sortIndices();
double ret = 0;
int vl = 0;
int vnl = v.size;
if (values == null) {
if (v.values == null) {
for (int i = 0; i < size; i++) {
while (vl < vnl && v.indices[vl] < indices[i])
vl++;
if (vl < vnl && v.indices[vl] == indices[i])
ret += 1.0;
}
} else {
for (int i = 0; i < size; i++) {
while (vl < vnl && v.indices[vl] < indices[i])
vl++;
if (vl < vnl && v.indices[vl] == indices[i])
ret += v.values[vl];
}
}
} else if (indices == null) {
for (int i = 0; i < vnl; i++) {
int index = v.indexAtLocation(i);
if (index < size)
ret += v.valueAtLocation(i) * values[index];
}
} else {
if (v.values == null) {
for (int i = 0; i < size; i++) {
while (vl < vnl && v.indices[vl] < indices[i])
vl++;
if (vl < vnl && v.indices[vl] == indices[i])
ret += values[i];
}
} else {
for (int i = 0; i < size; i++) {
while (vl < vnl && v.indices[vl] < indices[i])
vl++;
if (vl < vnl && v.indices[vl] == indices[i])
ret += values[i] * v.values[vl];
}
}
}
return ret;
}
public void plusEquals (AugmentableFeatureVector v, double factor) {
if (indices != null && size-1 != maxSortedIndex)
sortIndices();
if (v.indices != null && v.size-1 != v.maxSortedIndex)
v.sortIndices();
int vl = 0;
int vnl = v.size;
assert (values != null);
if (indices == null) {
if (v.indices == null) {
vnl = Math.min (vnl, size);
for (int i = 0; i < vnl; i++)
values[i] += v.values[i];
} else {
// v.indices != null
for (int i = 0; i < vnl; i++) {
int index = v.indices[i];
if (index < values.length) {
values[index] += v.values[i] * factor;
if (index >= size)
size = index+1;
}
}
}
} else {
// indices != null
if (v.indices == null) {
for (int i = 0; i < size; i++) {
if (indices[i] < vnl)
values[i] += v.values[indices[i]];
// xxx We should check to see if there were more
// higher indices in "v" that didn't get added!
}
} else {
// v.indices != null
if (v.values == null) {
// v.indices != null && v.values == null
for (int i = 0; i < size; i++) {
while (vl < vnl && v.indices[vl] < indices[i])
vl++;
if (vl < vnl && v.indices[vl] == indices[i])
values[i] += factor;
// xxx We should check to see if there were more
// higher indices in "v" that didn't get added!
}
} else {
// v.indices != null && v.values != null
for (int i = 0; i < size; i++) {
while (vl < vnl && v.indices[vl] < indices[i])
vl++;
if (vl < vnl && v.indices[vl] == indices[i])
values[i] += v.values[vl] * factor;
// xxx We should check to see if there were more
// higher indices in "v" that didn't get added!
}
}
}
}
}
// But only adds to those entries that have "locations" (i.e. are already non-zero)
public void plusEquals (SparseVector v, double factor) {
if (v instanceof AugmentableFeatureVector) {
plusEquals ((AugmentableFeatureVector)v, factor);
return;
}
//assert (false) : v.getClass().getName(); // This code needs to be checked!
if (indices != null && size-1 != maxSortedIndex)
sortIndices();
int vl = 0;
assert (values != null);
if (indices == null) {
if (v.indices == null) {
// indices == null && v.indices == null (&& v.values != null)
int s = Math.min (size, v.values.length);
for (int i = 0; i < s; i++)
values[i] += v.values[i] * factor;
// xxx We aren't adding in values with indices higher than "this.size"!
} else {
// indices == null && v.indices != null
if (v.values == null) {
// indices == null && v.indices != null && v.values == null
for (int i = 0; i < v.indices.length; i++) {
int index = v.indices[i];
if (index < size)
values[index] += factor;
}
// xxx We aren't adding in values with indices higher than "size"!
} else {
// indices == null && v.indices != null && v.values != null
for (int i = 0; i < v.indices.length; i++) {
int index = v.indices[i];
if (index < size)
values[index] += v.values[i] * factor;
// xxx We aren't adding in values with indices higher than "size"!
}
}
}
} else {
// indices != null
if (v.indices == null) {
// indices != null && v.indices == null (&& v.values != null)
for (int i = 0; i < size; i++)
if (indices[i] < v.values.length)
values[i] += v.values[indices[i]] * factor;
// xxx We aren't adding in values with indices higher than "size"!
} else {
// indices != null && v.indices != null
int vnl = v.indices.length;
if (v.values == null) {
// indices != null && v.indices != null && v.values == null
for (int i = 0; i < size; i++) {
while (vl < vnl && v.indices[vl] < indices[i])
vl++;
if (vl < vnl && v.indices[vl] == indices[i])
values[i] += v.values[vl] * factor;
// xxx We should check to see if there were more
// higher indices in "v" that didn't get added!
}
} else {
// indices != null && v.indices != null && v.values != null
for (int i = 0; i < size; i++) {
while (vl < vnl && v.indices[vl] < indices[i])
vl++;
if (vl < vnl && v.indices[vl] == indices[i])
values[i] += v.values[vl] * factor;
// xxx We should check to see if there were more
// higher indices in "v" that didn't get added!
}
}
}
}
}
public void plusEquals (SparseVector v) {
plusEquals (v, 1.0);
}
public void setAll (double v)
{
assert (values != null);
for (int i = 0; i < values.length; i++)
values[i] = v;
}
public double oneNorm () {
if (size-1 != maxSortedIndex)
sortIndices();
double ret = 0;
if (values == null)
return size;
for (int i = 0; i < size; i++)
ret += values[i];
return ret;
}
public double twoNorm () {
if (size-1 != maxSortedIndex)
sortIndices();
double ret = 0;
if (values == null)
return Math.sqrt (size);
for (int i = 0; i < size; i++)
ret += values[i] * values[i];
return Math.sqrt (ret);
}
public double infinityNorm () {
if (size-1 != maxSortedIndex)
sortIndices();
if (values == null)
return 1.0;
double max = Double.NEGATIVE_INFINITY;
for (int i = 0; i < size; i++)
if (Math.abs(values[i]) > max)
max = Math.abs(values[i]);
return max;
}
public void print() {
//System.out.println ("ASV size="+size+" dict.size="+dictionary.size()+" values.length="+values.length+" indices.length="+indices.length);
if (size-1 != maxSortedIndex)
sortIndices();
super.print();
}
protected void sortIndices ()
{
if (indices == null) // vector is dense, so indices are already sorted
return;
else if (this.size == 0) { // assume method called from constructor; initialize member vars
this.size = indices.length;
this.maxSortedIndex = -1;
}
// Just BubbleSort; this is efficient when already mostly sorted.
// Note that we BubbleSort from the the end forward; this is most efficient
// when we have added a few additional items to the end of a previously sorted list.
// Note that we remember the highest index that was already sorted as "maxSortedIndex".
// Note that maxSortedIndex may be -1 here, so the first time through the outer loop
// just falls through immediately when the termination condition of the inner loop is met.
for (int i = maxSortedIndex+1; i < size; i++) {
for (int j = i; j > 0; j--) {
if (indices[j] < indices[j-1]) {
// Swap both indices and values
int f;
f = indices[j];
indices[j] = indices[j-1];
indices[j-1] = f;
if (values != null) {
double v;
v = values[j];
values[j] = values[j-1];
values[j-1] = v;
}
}
}
}
removeDuplicates (0);
maxSortedIndex = size-1;
}
// Argument zero is special value meaning that this function should count them,
// otherwise it assumes they have been counted elsewhere, and that numDuplicates
// is how many that count yeilded.
// Note that this method relies on the indices being sorted first
protected void removeDuplicates (int numDuplicates)
{
if (indices == null)
return;
//System.out.print ("AFV removeDuplicates ");
//for (int i = 0; i < size; i++)
//System.out.print (" " + dictionary.lookupObject(indices[i]) + "=" + indices[i]);
//System.out.println (" numDuplicates="+numDuplicates);
if (numDuplicates == 0)
for (int i = 1; i < size; i++)
if (indices[i-1] == indices[i])
numDuplicates++;
if (numDuplicates == 0)
return;
assert (indices.length - numDuplicates > 0)
: "size="+size+" indices.length="+indices.length+" numDuplicates="+numDuplicates;
int[] newIndices = new int[size - numDuplicates];
double[] newValues = values == null ? null : new double[size - numDuplicates];
newIndices[0] = indices[0];
assert (indices.length >= size);
for (int i = 0, j = 0; i < size-1; i++) {
if (indices[i] == indices[i+1]) {
if (values != null)
newValues[j] += values[i];
} else {
newIndices[j] = indices[i];
if (values != null)
newValues[j] += values[i];
j++;
}
if(i == size-2) {
if(values != null)
newValues[j] += values[i+1];
newIndices[j] = indices[i+1];
}
}
this.indices = newIndices;
this.values = newValues;
this.size -= numDuplicates;
this.maxSortedIndex = size - 1;
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeInt (size);
out.writeInt (maxSortedIndex);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
size = in.readInt();
maxSortedIndex = in.readInt();
}
}
| 23,210 | 29.78382 | 140 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/ArrayListSequence.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.util.ArrayList;
import cc.mallet.types.Sequence;
public class ArrayListSequence<E> extends ArrayList<E> implements Sequence<E>
{
private static final long serialVersionUID = 1;
}
| 743 | 28.76 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/GradientGain.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
The difference between constraint and expectation for each feature on the correct class.
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.io.*;
import cc.mallet.classify.Classification;
public class GradientGain extends RankedFeatureVector
{
// GradientGain of a feature, f, is defined in terms of MaxEnt-type feature+class "Feature"s, F,
// F = f,c
// GraidentGain of a Feature, F, is
// G(F) = G(f,c) = abs(E~[F] - E.[F]
// where E~[] is the empirical distribution, according to the true class label distribution
// and E.[] is the distribution from the (imperfect) classifier
// GradientGain of a feature,f, is
// G(f) = sum_c G(f,c)
private static double[] calcGradientGains (InstanceList ilist, LabelVector[] classifications)
{
int numInstances = ilist.size();
int numClasses = ilist.getTargetAlphabet().size();
int numFeatures = ilist.getDataAlphabet().size();
double[] gradientgains = new double[numFeatures];
double flv; // feature location value
int fli; // feature location index
// Populate targetFeatureCount, et al
for (int i = 0; i < ilist.size(); i++) {
assert (classifications[i].getLabelAlphabet() == ilist.getTargetAlphabet());
Instance inst = ilist.get(i);
Labeling labeling = inst.getLabeling ();
FeatureVector fv = (FeatureVector) inst.getData ();
double instanceWeight = ilist.getInstanceWeight(i);
// The code below relies on labelWeights summing to 1 over all labels!
double labelWeightSum = 0;
for (int ll = 0; ll < labeling.numLocations(); ll++) {
int li = labeling.indexAtLocation (ll);
double labelWeight = labeling.value (li);
labelWeightSum += labelWeight;
double labelWeightDiff = Math.abs(labelWeight - classifications[i].value(li));
for (int fl = 0; fl < fv.numLocations(); fl++) {
fli = fv.indexAtLocation(fl);
gradientgains[fli] += fv.valueAtLocation(fl) * labelWeightDiff * instanceWeight;
}
}
assert (Math.abs (labelWeightSum - 1.0) < 0.0001);
}
return gradientgains;
}
public GradientGain (InstanceList ilist, LabelVector[] classifications)
{
super (ilist.getDataAlphabet(), calcGradientGains (ilist, classifications));
}
private static LabelVector[] getLabelVectorsFromClassifications (Classification[] c)
{
LabelVector[] ret = new LabelVector[c.length];
for (int i = 0; i < c.length; i++)
ret[i] = c[i].getLabelVector();
return ret;
}
public GradientGain (InstanceList ilist, Classification[] classifications)
{
super (ilist.getDataAlphabet(),
calcGradientGains (ilist, getLabelVectorsFromClassifications(classifications)));
}
public static class Factory implements RankedFeatureVector.Factory
{
LabelVector[] classifications;
public Factory (LabelVector[] classifications)
{
this.classifications = classifications;
}
public RankedFeatureVector newRankedFeatureVector (InstanceList ilist)
{
return new GradientGain (ilist, classifications);
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeInt(classifications.length);
for (int i = 0; i < classifications.length; i++)
out.writeObject(classifications[i]);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
int n = in.readInt();
this.classifications = new LabelVector[n];
for (int i = 0; i < n; i++)
this.classifications[i] = (LabelVector)in.readObject();
}
}
}
| 4,141 | 33.231405 | 97 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/LabelsSequence.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types;
import java.io.Serializable;
import java.io.ObjectOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
/**
* A simple {@link Sequence} implementation where all of the
* elements must be Labels. Provides a convenient type-safe accessor {@link #getLabels}.
* Instances of LabelsSequence are immutable.
*
* @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
public class LabelsSequence implements Sequence, AlphabetCarrying, Serializable
{
Labels[] seq;
/**
* Create a LabelsSequence from an array. The array is shallow-copied.
*/
public LabelsSequence (Labels[] seq)
{
for (int i = 0; i < seq.length-1; i++)
if (!Alphabet.alphabetsMatch(seq[i], seq[i+1]))
throw new IllegalArgumentException ("Alphabets do not match");
this.seq = new Labels[seq.length];
System.arraycopy (seq, 0, this.seq, 0, seq.length);
}
public LabelsSequence (LabelSequence seq)
{
this.seq = new Labels[seq.size()];
for (int i = 0; i < seq.length; i++) {
this.seq[i] = new Labels (new Label[] { seq.getLabelAtPosition (i) });
}
}
public Alphabet getAlphabet () { return seq[0].getAlphabet(); }
public Alphabet[] getAlphabets () { return seq[0].getAlphabets(); }
public int size () { return seq.length; }
public Object get (int i) { return seq[i]; }
public Labels getLabels (int i) { return seq[i]; }
public String toString ()
{
String ret = "LabelsSequence:\n";
for (int i = 0; i < seq.length; i++) {
ret += i+": ";
ret += seq[i].toString();
ret += "\n";
}
return ret;
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt(CURRENT_SERIAL_VERSION);
out.defaultWriteObject ();
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
in.defaultReadObject ();
}
}
| 2,491 | 27.643678 | 93 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/SequencePairAlignment.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
public class SequencePairAlignment<I,O> extends SequencePair<I,O>
{
protected double weight;
public SequencePairAlignment (Sequence<I> input, Sequence<O> output, double weight)
{
super (input, output);
this.weight = weight;
}
protected SequencePairAlignment ()
{
}
public double getWeight()
{
return weight;
}
}
| 886 | 22.972973 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/Token.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.util.Iterator;
import java.util.ArrayList;
import java.util.HashMap;
import java.io.ObjectOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.Serializable;
import cc.mallet.util.PropertyList;
/** A representation of a piece of text, usually a single word, to
which we can attach properties. */
public class Token implements Serializable, PropertyHolder {
private String text;
PropertyList properties = null; // for arbitrary properties
PropertyList features = null; // numeric, to turn into a FeatureVector
public Token (String s) {
text = s;
}
public String getText () {
return text;
}
public void setText (String t) {
text = t;
}
// xxx This implementation may change in the future!
// If you really just want the text, you should use Token.getText() instead.
public String toString () {
StringBuffer sb = new StringBuffer ();
sb.append (getText());
if (features != null) {
PropertyList.Iterator iter = features.iterator();
while (iter.hasNext()) {
iter.next();
sb.append (" feature(" + iter.getKey() + ")=" +iter.getNumericValue());
}
}
if (properties != null) {
PropertyList.Iterator iter = properties.iterator();
while (iter.hasNext()) {
iter.next();
if (iter.isNumeric())
sb.append (" property(" + iter.getKey() + ")=" +iter.getNumericValue());
else
sb.append (" property(" + iter.getKey() + ")=" +iter.getObjectValue());
}
}
return sb.toString();
}
public String toStringWithFeatureNames () {
StringBuffer sb = new StringBuffer ();
sb.append (getText());
if (features != null) {
PropertyList.Iterator iter = features.iterator();
while (iter.hasNext()) {
iter.next();
sb.append (" " + iter.getKey());
}
}
return sb.toString();
}
public FeatureVector toFeatureVector (Alphabet dict, boolean binary) {
return new FeatureVector (dict, features, binary);
}
public void setProperty (String key, Object value) {
properties = PropertyList.add (key, value, properties);
}
public void setNumericProperty (String key, double value) {
properties = PropertyList.add (key, value, properties);
}
public PropertyList getProperties () {
return properties;
}
public void setProperties (PropertyList newProperties) {
properties = newProperties;
}
public Object getProperty (String key) {
return properties == null ? null : properties.lookupObject (key);
}
public double getNumericProperty (String key) {
return (properties == null ? 0.0 : properties.lookupNumber (key));
}
public boolean hasProperty (String key) {
return (properties != null && properties.hasProperty( key ));
}
public void setFeatureValue (String key, double value) {
features = PropertyList.add (key, value, features);
}
public double getFeatureValue (String key) {
return (features == null ? 0.0 : features.lookupNumber (key));
}
public PropertyList getFeatures () {
return features;
}
public void setFeatures (PropertyList pl) {
features = pl;
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt(CURRENT_SERIAL_VERSION);
out.defaultWriteObject ();
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
in.defaultReadObject ();
}
}
| 4,024 | 25.655629 | 92 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/AlphabetFactory.java | package cc.mallet.types;
import java.io.*;
public class AlphabetFactory {
/** Create a dummy alphabet with <code>n</code> dimensions */
public static Alphabet alphabetOfSize (int n) {
Alphabet alphabet = new Alphabet();
for (int i = 0; i < n; i++) {
alphabet.lookupIndex("d" + i);
}
return alphabet;
}
/** Create a dummy label alphabet with <code>n</code> dimensions */
public static LabelAlphabet labelAlphabetOfSize (int n) {
LabelAlphabet alphabet = new LabelAlphabet();
for (int i = 0; i < n; i++) {
alphabet.lookupIndex("d" + i);
}
return alphabet;
}
/** Load an alphabet from a file, one item per line */
public static Alphabet loadFromFile(File alphabetFile) throws IOException {
BufferedReader reader = new BufferedReader(new FileReader(alphabetFile));
Alphabet alphabet = new Alphabet();
String item;
while ((item = reader.readLine()) != null) {
alphabet.lookupIndex(item);
}
reader.close();
return alphabet;
}
} | 981 | 23.55 | 76 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/NormalizedDotProductMetric.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/** Interface for a measure of distance between two <CODE>SparseVector</CODE>s
@author Aron Culotta <A HREF="mailto:[email protected]">[email protected]</A>
*/
package cc.mallet.types;
import java.util.HashMap;
import cc.mallet.types.SparseVector;
/**
Computes
1 - [<x,y> / sqrt (<x,x>*<y,y>)]
aka 1 - cosine similarity
*/
public class NormalizedDotProductMetric implements CachedMetric {
HashMap hash; // stores the self dot-products used for normalization
public NormalizedDotProductMetric () {
this.hash = new HashMap ();
}
public double distance (SparseVector a, SparseVector b) {
// double ret = a.dotProduct (b) /
// Math.sqrt (a.dotProduct (a) * b.dotProduct (b));
// gmann : twoNorm() more efficient than a.dotProduct(a)
double ret = a.dotProduct(b) / (a.twoNorm()*b.twoNorm());
return 1.0 - ret;
}
public double distance( SparseVector a, int hashCodeA,
SparseVector b, int hashCodeB) {
Double cachedA = (Double) hash.get (new Integer (hashCodeA));
Double cachedB = (Double) hash.get (new Integer (hashCodeB));
if (a == null || b == null)
return 1.0;
if (cachedA == null) {
cachedA = new Double (a.dotProduct (a));
hash.put (new Integer (hashCodeA), cachedA);
}
if (cachedB == null) {
cachedB = new Double (b.dotProduct (b));
hash.put (new Integer (hashCodeB), cachedB);
}
double ab = a.dotProduct (b);
if (cachedA == null || cachedB == null) {
throw new IllegalStateException ("cachedValues null");
}
double ret = a.dotProduct (b) / Math.sqrt (cachedA.doubleValue()*cachedB.doubleValue());
return 1.0 - ret;
}
}
| 2,075 | 30.938462 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/InvertedIndex.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
Simple, in-memory inverted index that stores a list of instances having each feature, but not
a value associated with each. Currently only works with FeatureVectors.
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.util.*;
public class InvertedIndex
{
InstanceList ilist;
ArrayList[] ii;
public InvertedIndex (InstanceList ilist)
{
// bug fix from Georgios Paltoglou ([email protected])
this.ilist = ilist;
int numFeatures = ilist.getDataAlphabet().size();
ii = new ArrayList[numFeatures];
for (int i = 0; i < ilist.size(); i++) {
Instance inst = ilist.get(i);
if (!(inst.getData() instanceof FeatureVector))
throw new IllegalArgumentException (this.getClass().getName() +
" currently only handles FeatureVector data");
FeatureVector fv = (FeatureVector) inst.getData ();
for (int fl = 0; fl < fv.numLocations(); fl++) {
if (fv.valueAtLocation(fl) != 0)
addEntry (fv.indexAtLocation(fl), inst);
}
}
}
private void addEntry (int featureIndex, Instance instance)
{
if (ii[featureIndex] == null)
ii[featureIndex] = new ArrayList(2);
ii[featureIndex].add (instance);
}
public InstanceList getInstanceList () { return ilist; }
public ArrayList getInstancesWithFeature (int featureIndex)
{
return ii[featureIndex];
}
public ArrayList getInstancesWithFeature (Object feature)
{
int index = ilist.getDataAlphabet().lookupIndex (feature, false);
if (index == -1)
throw new IllegalArgumentException ("Feature "+feature+" not contained in InvertedIndex");
return getInstancesWithFeature (index);
}
public int getCountWithFeature (int featureIndex)
{
ArrayList a = ii[featureIndex];
return a == null ? 0 : a.size();
}
public int getCountWithFeature (Object feature)
{
int index = ilist.getDataAlphabet().lookupIndex (feature, false);
if (index == -1)
throw new IllegalArgumentException ("Feature "+feature+" not contained in InvertedIndex");
ArrayList a = ii[index];
return a == null ? 0 : a.size();
}
}
| 2,540 | 28.894118 | 95 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/GainRatio.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types;
import java.awt.geom.Point2D;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.logging.Logger;
import cc.mallet.util.MalletLogger;
import cc.mallet.util.Maths;
/**
* List of features along with their thresholds sorted in descending order of
* the ratio of (1) information gained by splitting instances on the
* feature at its associated threshold value, to (2) the split information.<p>
*
* The calculations performed do not take into consideration the instance weights.<p>
*
* To create an instance of GainRatio from an InstanceList, one must do the following:<p><tt>
*
* InstanceList ilist = ...
* ...
* GainRatio gr = GainRatio.createGainRatio(ilist);
* </tt><p>
*
* J. R. Quinlan
* "Improved Use of Continuous Attributes in C4.5"
* ftp://ftp.cs.cmu.edu/project/jair/volume4/quinlan96a.ps
*
* @author Gary Huang <a href="mailto:[email protected]">[email protected]</a>
*/
public class GainRatio extends RankedFeatureVector
{
private static final Logger logger = MalletLogger.getLogger (GainRatio.class.getName ());
private static final long serialVersionUID = 1L;
public static final double log2 = Math.log(2);
double[] m_splitPoints;
double m_baseEntropy;
LabelVector m_baseLabelDistribution;
int m_numSplitPointsForBestFeature;
int m_minNumInsts;
/**
* Calculates gain ratios for all (feature, split point) pairs
* snd returns array of:<pre>
* 1. gain ratios (each element is the max gain ratio of a feature
* for those split points with at least average gain)
* 2. the optimal split point for each feature
* 3. the overall entropy
* 4. the overall label distribution of the given instances
* 5. the number of split points of the split feature.
* </pre>
*/
protected static Object[] calcGainRatios(InstanceList ilist, int[] instIndices, int minNumInsts)
{
int numInsts = instIndices.length;
Alphabet dataDict = ilist.getDataAlphabet();
LabelAlphabet targetDict = (LabelAlphabet) ilist.getTargetAlphabet();
double[] targetCounts = new double[targetDict.size()];
// Accumulate target label counts and make sure
// the sum of each instance's target label is 1
for (int ii = 0; ii < numInsts; ii++) {
Instance inst = ilist.get(instIndices[ii]);
Labeling labeling = inst.getLabeling();
double labelWeightSum = 0;
for (int ll = 0; ll < labeling.numLocations(); ll++) {
int li = labeling.indexAtLocation(ll);
double labelWeight = labeling.valueAtLocation(ll);
labelWeightSum += labelWeight;
targetCounts[li] += labelWeight;
}
assert(Maths.almostEquals(labelWeightSum, 1));
}
// Calculate the base entropy Info(D) and the the
// label distribution of the given instances
double[] targetDistribution = new double[targetDict.size()];
double baseEntropy = 0;
for (int ci = 0; ci < targetDict.size(); ci++) {
double p = targetCounts[ci] / numInsts;
targetDistribution[ci] = p;
if (p > 0)
baseEntropy -= p * Math.log(p) / log2;
}
LabelVector baseLabelDistribution = new LabelVector(targetDict, targetDistribution);
double infoGainSum = 0;
int totalNumSplitPoints = 0;
double[] passTestTargetCounts = new double[targetDict.size()];
// Maps feature index -> Hashtable, and each table
// maps (split point) -> (info gain, split ratio)
Hashtable[] featureToInfo = new Hashtable[dataDict.size()];
// Go through each feature's split points in ascending order
for (int fi = 0; fi < dataDict.size(); fi++) {
if ((fi+1) % 1000 == 0)
logger.info("at feature " + (fi+1) + " / " + dataDict.size());
featureToInfo[fi] = new Hashtable();
Arrays.fill(passTestTargetCounts, 0);
// Sort instances on this feature's values
instIndices = sortInstances(ilist, instIndices, fi);
// Iterate through the sorted instances
for (int ii = 0; ii < numInsts-1; ii++) {
Instance inst = ilist.get(instIndices[ii]);
Instance instPlusOne = ilist.get(instIndices[ii+1]);
FeatureVector fv1 = (FeatureVector) inst.getData();
FeatureVector fv2 = (FeatureVector) instPlusOne.getData();
double lower = fv1.value(fi);
double higher = fv2.value(fi);
// Accumulate the label weights for instances passing the test
Labeling labeling = inst.getLabeling();
for (int ll = 0; ll < labeling.numLocations(); ll++) {
int li = labeling.indexAtLocation(ll);
double labelWeight = labeling.valueAtLocation(ll);
passTestTargetCounts[li] += labelWeight;
}
if (Maths.almostEquals(lower, higher)
||
inst.getLabeling().toString().equals(instPlusOne.getLabeling().toString()))
continue;
// For this (feature, spilt point) pair, calculate the
// info gain of using this pair to split insts into those
// with value of feature <= p versus > p
totalNumSplitPoints++;
double splitPoint = (lower + higher) / 2;
double numPassInsts = ii+1;
// If this split point creates a partition
// with too few instances, ignore it
double numFailInsts = numInsts - numPassInsts;
if (numPassInsts < minNumInsts || numFailInsts < minNumInsts)
continue;
// If all instances pass or fail this test, it is useless
double passProportion = numPassInsts / numInsts;
if (Maths.almostEquals(passProportion, 0) || Maths.almostEquals(passProportion, 1))
continue;
// Calculate the entropy of instances passing and failing the test
double passEntropy = 0;
double failEntropy = 0;
double p;
for (int ci = 0; ci < targetDict.size(); ci++) {
if (numPassInsts > 0) {
p = passTestTargetCounts[ci] / numPassInsts;
if (p > 0)
passEntropy -= p * Math.log(p) / log2;
}
if (numFailInsts > 0) {
double failTestTargetCount = targetCounts[ci] - passTestTargetCounts[ci];
p = failTestTargetCount / numFailInsts;
if (p > 0)
failEntropy -= p * Math.log(p) / log2;
}
}
// Calculate Gain(D, T), the information gained
// by testing on this (feature, split-point) pair
double gainDT = baseEntropy
- passProportion * passEntropy
- (1-passProportion) * failEntropy;
infoGainSum += gainDT;
// Calculate Split(D, T), the split information
double splitDT =
- passProportion * Math.log(passProportion) / log2
- (1-passProportion) * Math.log(1-passProportion) / log2;
// Calculate the gain ratio
double gainRatio = gainDT / splitDT;
featureToInfo[fi].put(new Double(splitPoint),
new Point2D.Double(gainDT, gainRatio));
} // End loop through sorted instances
} // End loop through features
// For each feature's split point with at least average gain,
// get the maximum gain ratio and the associated split point
// (using the info gain as tie breaker)
double[] gainRatios = new double[dataDict.size()];
double[] splitPoints = new double[dataDict.size()];
int numSplitsForBestFeature = 0;
// If all feature vectors are identical or no splits are worthy, return all 0s
if (totalNumSplitPoints == 0 || Maths.almostEquals(infoGainSum, 0))
return new Object[] {gainRatios, splitPoints, new Double(baseEntropy),
baseLabelDistribution, new Integer(numSplitsForBestFeature)};
double avgInfoGain = infoGainSum / totalNumSplitPoints;
double maxGainRatio = 0;
double gainForMaxGainRatio = 0; // tie breaker
int xxx = 0;
for (int fi = 0; fi < dataDict.size(); fi++) {
double featureMaxGainRatio = 0;
double featureGainForMaxGainRatio = 0;
double bestSplitPoint = Double.NaN;
for (Iterator iter = featureToInfo[fi].keySet().iterator(); iter.hasNext(); ) {
Object key = iter.next();
Point2D.Double pt = (Point2D.Double) featureToInfo[fi].get(key);
double splitPoint = ((Double) key).doubleValue();
double infoGain = pt.getX();
double gainRatio = pt.getY();
if (infoGain >= avgInfoGain) {
if (gainRatio > featureMaxGainRatio
||
(gainRatio == featureMaxGainRatio && infoGain > featureGainForMaxGainRatio)) {
featureMaxGainRatio = gainRatio;
featureGainForMaxGainRatio = infoGain;
bestSplitPoint = splitPoint;
}
}
else
xxx++;
}
assert(bestSplitPoint != Double.NaN);
gainRatios[fi] = featureMaxGainRatio;
splitPoints[fi] = bestSplitPoint;
if (featureMaxGainRatio > maxGainRatio
||
(featureMaxGainRatio == maxGainRatio && featureGainForMaxGainRatio > gainForMaxGainRatio)) {
maxGainRatio = featureMaxGainRatio;
gainForMaxGainRatio = featureGainForMaxGainRatio;
numSplitsForBestFeature = featureToInfo[fi].size();
}
}
logger.info("label distrib:\n" + baseLabelDistribution);
logger.info("base entropy=" + baseEntropy + ", info gain sum=" + infoGainSum + ", total num split points=" + totalNumSplitPoints + ", avg info gain=" + avgInfoGain + ", num splits with < avg gain=" + xxx);
return new Object[] {gainRatios, splitPoints, new Double(baseEntropy),
baseLabelDistribution, new Integer(numSplitsForBestFeature)};
}
public static int[] sortInstances(InstanceList ilist, int[] instIndices, int featureIndex)
{
ArrayList list = new ArrayList();
for (int ii = 0; ii < instIndices.length; ii++) {
Instance inst = ilist.get(instIndices[ii]);
FeatureVector fv = (FeatureVector) inst.getData();
list.add(new Point2D.Double(instIndices[ii], fv.value(featureIndex)));
}
Collections.sort(list, new Comparator()
{
public int compare(Object o1, Object o2)
{
Point2D.Double p1 = (Point2D.Double) o1;
Point2D.Double p2 = (Point2D.Double) o2;
if (p1.y == p2.y) {
assert(p1.x != p2.x);
return p1.x > p2.x ? 1 : -1;
}
else
return p1.y > p2.y ? 1 : -1;
}
});
int[] sorted = new int[instIndices.length];
for (int i = 0; i < list.size(); i++)
sorted[i] = (int) ((Point2D.Double) list.get(i)).getX();
return sorted;
}
/**
* Constructs a GainRatio object.
*/
public static GainRatio createGainRatio(InstanceList ilist)
{
int[] instIndices = new int[ilist.size()];
for (int ii = 0; ii < instIndices.length; ii++)
instIndices[ii] = ii;
return createGainRatio(ilist, instIndices, 2);
}
/**
* Constructs a GainRatio object
*/
public static GainRatio createGainRatio(InstanceList ilist, int[] instIndices,
int minNumInsts)
{
Object[] objs = calcGainRatios(ilist, instIndices, minNumInsts);
double[] gainRatios = (double[]) objs[0];
double[] splitPoints = (double[]) objs[1];
double baseEntropy = ((Double) objs[2]).doubleValue();
LabelVector baseLabelDistribution = (LabelVector) objs[3];
int numSplitPointsForBestFeature = ((Integer) objs[4]).intValue();
return new GainRatio(ilist.getDataAlphabet(), gainRatios, splitPoints,
baseEntropy, baseLabelDistribution,
numSplitPointsForBestFeature, minNumInsts);
}
protected GainRatio(Alphabet dataAlphabet, double[] gainRatios, double[] splitPoints,
double baseEntropy, LabelVector baseLabelDistribution,
int numSplitPointsForBestFeature, int minNumInsts)
{
super (dataAlphabet, gainRatios);
m_splitPoints = splitPoints;
m_baseEntropy = baseEntropy;
m_baseLabelDistribution = baseLabelDistribution;
m_numSplitPointsForBestFeature = numSplitPointsForBestFeature;
m_minNumInsts = minNumInsts;
}
/**
* @return the threshold of the (feature, threshold)
* pair with with maximum gain ratio
*/
public double getMaxValuedThreshold()
{
return getThresholdAtRank(0);
}
/**
* @return the threshold of the (feature, threshold)
* pair with the given rank
*/
public double getThresholdAtRank(int rank)
{
int index = getIndexAtRank(rank);
return m_splitPoints[index];
}
public double getBaseEntropy ()
{
return m_baseEntropy;
}
public LabelVector getBaseLabelDistribution ()
{
return m_baseLabelDistribution;
}
public int getNumSplitPointsForBestFeature()
{
return m_numSplitPointsForBestFeature;
}
}
| 12,552 | 33.111413 | 207 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/InstanceListTUI.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
A command-line interface for creating an InstanceList.
I would have put this in InstanceList itself, but it doesn't seem that an inner class
can have its own main()???
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types;
import java.io.*;
import java.util.*;
//import bsh.Interpreter;
import java.util.regex.*;
import cc.mallet.pipe.*;
import cc.mallet.pipe.iterator.*;
import cc.mallet.types.*;
import cc.mallet.util.*;
public class InstanceListTUI
{
static CommandOption.String prefixCodeOption = new CommandOption.String
(InstanceList.class, "prefix-code", "Java code", true, null,
"Java code you want run before any other interpreted code. Note that the text "+
"is interpretted without modification, so unlike some other options, "+
"you need to include any necessary 'new's.", null);
static CommandOption.SpacedStrings pipeInputOption = new CommandOption.SpacedStrings
(InstanceList.class, "pipe-input", "STRING...", true, null,
"The String or String[] that will be passed into the Pipe, "+
"(or the PipeInputIterator, if specified. If --pipe-input-iterator is specified, "+
"this option is not used.", null);
static final String defaultPipeIterator =
"FileIterator(pipeInput,FileIterator.STARTING_DIRECTORIES)";
static CommandOption.String pipeInputIteratorOption = new CommandOption.String
(InstanceList.class, "pipe-input-iterator", "PipeInputIterator constructor", true, defaultPipeIterator,
"A constructor for a PipeInputIterator, omitting the 'new', and substiting 'pipeInput' with the "+
"String or String[] that comes from the --pipe-input option.",
"By default this value is null, indicating that no iterator is to be run, and simply "+
"the single --pipe-input argument should be put directly into the pipe.");
static final String defaultPipe =
"new Input2CharSequence(),new CharSequence2TokenSequence(),new TokenSequenceLowercase(),"+
"new TokenSequenceRemoveStopwords(),new TokenSequence2FeatureSequence(),new FeatureSequence2FeatureVector(),"+
"new Target2Label()";
static CommandOption.String pipeOption = new CommandOption.String
(InstanceList.class, "pipe", "Pipe constructor", true, defaultPipe,
"List of Java constructors for Pipe objects to be run in serial to process the pipe input, "+
"separated by semi-colons, with the 'new's omitted.", null);
static CommandOption.File pipeFileOption = new CommandOption.File
(InstanceList.class, "pipefile", "FILENAME", true, null,
"Same as --pipe, except get the pipe specification from the named file instead of from the command line. "+
"If both are set, the --pipe option takes precedence.", null);
static CommandOption.String outputFilenameOption = new CommandOption.String
(InstanceList.class, "output-file", "FILENAME", true, "instance-list.mallet",
"The filename in which to write the resulting instance list.", null);
// Some pre-packaged, typical configurations for pipe-input, pipe-input-iterator and pipe.
static CommandOption.SpacedStrings textFileClassificationOption = new CommandOption.SpacedStrings
(InstanceList.class, "pipe-text-file-directories", "DIR...", false, null,
"Use a standard text classification pipeline run on all the files in the following directories, "+
"one directory per class name.", null);
static final CommandOption.List commandOptions =
new CommandOption.List (
"Options for creating, manipulating, querying and saving instance lists",
new CommandOption[] {
pipeInputOption,
pipeInputIteratorOption,
pipeOption,
outputFilenameOption,
textFileClassificationOption,
prefixCodeOption,
});
public static void main (String[] args) throws bsh.EvalError, java.io.IOException
{
// Process the command-line options
commandOptions.process (args);
BshInterpreter interpreter = new BshInterpreter(prefixCodeOption.value);
// Insert "new " before each constructor in the pipe specification
String pipeSpec = ((pipeOption.value == defaultPipe && pipeFileOption.value != null)
? IoUtils.contentsAsString (pipeFileOption.value)
: pipeOption.value);
//Pattern pat = Pattern.compile (",");
//Matcher mat = pat.matcher ("new SerialPipes(new Pipe[] { new "+pipeSpec+" })");
//String pipeWithNew = mat.replaceAll(", new ");
String pipeWithNew = "new SerialPipes(new Pipe[] { "+pipeSpec+" })";
// Construct the pipe
Pipe instancePipe = (Pipe) interpreter.eval (pipeWithNew);
//Pipe instancePipe = (Pipe) interpreter.eval (pipeOption.value);
//Pipe instancePipe = (Pipe) interpreter.eval ("new SerialPipes();");
InstanceList ilist = new InstanceList (instancePipe);
System.out.println ("Piping...");
//System.out.println ("pipeInput = "+pipeInputOption.value);
//System.out.println ("pipeInputIteator = "+pipeInput);
//System.out.println ("instancePipe = "+instancePipe);
// Run the pipe on the pipe input data
if (pipeInputIteratorOption.value != null) {
// Put the pipe-input in the bsh variable "pipeInput"
if (pipeInputOption.value.length > 1)
interpreter.set ("pipeInput", pipeInputOption.value);
else
interpreter.set ("pipeInput", pipeInputOption.value[0]);
Iterator<Instance> pii =
(Iterator<Instance>)interpreter.eval ("new "+pipeInputIteratorOption.value);
ilist.addThruPipe (pii);
} else {
Instance carrier;
if (pipeInputOption.value.length > 1)
carrier = instancePipe.instanceFrom(new Instance (pipeInputOption.value, null, null, null));
else
carrier = instancePipe.instanceFrom(new Instance (pipeInputOption.value[0], null, null, null));
if (carrier.getData() instanceof InstanceList)
ilist = (InstanceList) carrier.getData();
else
ilist.add (carrier);
}
// Save the instance list to disk
ObjectOutputStream oos = new ObjectOutputStream
(new FileOutputStream (outputFilenameOption.value));
oos.writeObject (ilist);
oos.close();
}
}
| 6,401 | 40.571429 | 111 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestToken.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types.tests;
import junit.framework.*;
import java.net.URI;
import java.net.URL;
import java.io.File;
import cc.mallet.types.Alphabet;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.Token;
public class TestToken extends TestCase
{
public TestToken (String name) {
super (name);
}
public void testOne ()
{
Token t = new Token ("foo");
t.setProperty ("color", "red");
t.setProperty ("font", "TimesRoman");
t.setFeatureValue ("length", 3);
t.setFeatureValue ("containsVowel", 1);
t.setFeatureValue ("in /usr/dict/words", 0);
Alphabet dict = new Alphabet();
FeatureVector fv = t.toFeatureVector (dict, false);
assertTrue (fv.numLocations() == 2);
assertTrue (fv.value (dict.lookupIndex("length")) == 3);
}
public void testTwo ()
{
try {
URI uri = new URI ("file:/home/andrew/what-professors-do.html");
System.out.println ("Scheme = " + uri.getScheme());
File file = new File (uri);
System.out.println (file.getCanonicalPath());
file = new File ("what-professors-do.html");
System.out.println ("Name: " + file.getName());
System.out.println ("Parent: " + file.getParent());
System.out.println ("Path: " + file.getPath());
System.out.println ("Canonical: " + file.getCanonicalPath());
System.out.println ("Absolute: " + file.getAbsolutePath());
} catch (Exception e) {
e.printStackTrace();
}
}
public static Test suite ()
{
return new TestSuite (TestToken.class);
}
protected void setUp ()
{
}
public static void main (String[] args)
{
junit.textui.TestRunner.run (suite());
}
}
| 2,128 | 23.755814 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestRankedFeatureVector.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types.tests;
import cc.mallet.types.*;
import junit.framework.*;
public class TestRankedFeatureVector extends TestCase
{
public TestRankedFeatureVector (String name) {
super (name);
}
public void testSetRankOrder ()
{
Alphabet v = new Alphabet ();
RankedFeatureVector rfv =
new RankedFeatureVector (v, new int[] {v.lookupIndex ("a"), v.lookupIndex ("b"), v.lookupIndex ("c"), v.lookupIndex ("d") },
new double[] {3.0, 1.0, 2.0, 6.0});
System.out.println ("vector size ="+rfv.numLocations());
for (int i = 0; i < rfv.numLocations(); i++)
System.out.println ("Rank="+i+" value="+rfv.getValueAtRank(i));
}
public static Test suite ()
{
return new TestSuite (TestRankedFeatureVector.class);
}
protected void setUp ()
{
}
public static void main (String[] args)
{
junit.textui.TestRunner.run (suite());
}
}
| 1,401 | 25.961538 | 127 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestMatrix.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types.tests;
import cc.mallet.types.DenseVector;
import cc.mallet.types.SparseVector;
import junit.framework.*;
public class TestMatrix extends TestCase
{
public TestMatrix (String name) {
super (name);
}
public void testTimesEquals ()
{
double[] d1 = new double[] {1, 2, 3, 4, 5};
SparseVector m1 = new SparseVector (d1);
SparseVector m2 = new SparseVector (d1);
m2.timesEqualsSparse(m1);
m2.print();
}
public static Test suite ()
{
return new TestSuite (TestMatrix.class);
}
protected void setUp ()
{
}
public static void main (String[] args)
{
junit.textui.TestRunner.run (suite());
}
}
| 1,170 | 21.960784 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestMatrixn.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types.tests;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import java.util.Arrays;
import java.io.IOException;
import cc.mallet.types.MatrixOps;
import cc.mallet.types.Matrixn;
/**
* Created: Aug 30, 2004
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestMatrixn.java,v 1.1 2007/10/22 21:37:55 mccallum Exp $
*/
public class TestMatrixn extends TestCase {
public TestMatrixn (String name)
{
super (name);
}
public void testIndexing1d ()
{
double m1[] = new double[]{1.0, 2.0, 3.0, 4.0};
int idx1[] = new int[1];
Matrixn a = new Matrixn (m1);
a.singleToIndices (3, idx1);
assertEquals (3, idx1[0]);
assertEquals (3, a.singleIndex (idx1));
}
public void testIndexing2d ()
{
int[] sizes = new int[]{2, 3};
double[] m1 = new double[6];
for (int i = 0; i < 6; i++) {
m1[i] = 2.0 * i;
}
Matrixn a = new Matrixn (sizes, m1);
int[] idx1 = new int[2];
a.singleToIndices (5, idx1);
System.out.println (idx1[0]+" , "+idx1[1]);
int[] trueIdx = new int[] {1, 2};
assertTrue (Arrays.equals (trueIdx, idx1));
assertEquals (5, a.singleIndex (idx1));
assertEquals (10.0, a.value (idx1), 1e-12);
}
public void testIndexing3d ()
{
Matrixn a = make3dMatrix ();
int[] idx1 = new int[3];
a.singleToIndices (21, idx1);
int[] trueIdx = new int[]{1, 2, 1};
assertTrue (Arrays.equals (trueIdx, idx1));
assertEquals (21, a.singleIndex (idx1));
assertEquals (42.0, a.value (idx1), 1e-12);
}
private Matrixn make3dMatrix ()
{
int[] sizes = new int[]{2, 3, 4};
double[] m1 = new double[24];
for (int i = 0; i < 24; i++) {
m1[i] = 2.0 * i;
}
Matrixn a = new Matrixn (sizes, m1);
return a;
}
public void testMatrixnSerializable () throws IOException, ClassNotFoundException
{
Matrixn a = make3dMatrix ();
Matrixn b = (Matrixn) TestSerializable.cloneViaSerialization (a);
assertEquals (a.singleSize(), b.singleSize());
for (int i = 0; i < a.singleSize (); i++) {
int[] idxa = new int [a.getNumDimensions ()];
int[] idxb = new int [a.getNumDimensions ()];
a.singleToIndices (i, idxa);
b.singleToIndices (i, idxb);
assertTrue (Arrays.equals (idxa, idxb));
assertEquals (a.value (idxa), b.value (idxb), 1e-12);
}
}
public static Test suite ()
{
return new TestSuite (TestMatrixn.class);
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestMatrixn (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 3,318 | 26.204918 | 83 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestHashedSparseVector.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types.tests;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import java.io.File;
import java.io.IOException;
import cc.mallet.types.DenseVector;
import cc.mallet.types.HashedSparseVector;
import cc.mallet.types.SparseVector;
import cc.mallet.util.FileUtils;
public class TestHashedSparseVector extends TestCase
{
public TestHashedSparseVector (String name) {
super (name);
}
double[] dbl1 = new double[] {1, 2, 3, 4, 5};
double[] dbl2 = new double[] {1, 1.5, 2, 1, 1};
double[] dbl3 = new double[] { 2.0, 2.5, 3.0, 4.7, 3.5,
3.6, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, };
int[] idxs = new int[] {3, 5, 7, 13, 15};
HashedSparseVector s1 = new HashedSparseVector (idxs, dbl1, dbl1.length, dbl1.length,
true, true, true);
HashedSparseVector s2 = new HashedSparseVector (idxs, dbl2, dbl2.length, dbl2.length,
true, true, true);
SparseVector d1 = new SparseVector (dbl3, true);
private void checkAnswer (HashedSparseVector actual, double[] ans)
{
assertEquals ("Wrong number of locations:",
ans.length, actual.numLocations());
for (int i = 0; i < actual.numLocations(); i++) {
assertEquals ("Value incorrect at location "+i+": ",
ans[i], actual.valueAtLocation (i) , 0.0);
}
}
public void testPlusEquals ()
{
HashedSparseVector s = (HashedSparseVector) s1.cloneMatrix ();
s.plusEqualsSparse (s2, 2.0);
checkAnswer (s, new double[] { 3, 5, 7, 6, 7 });
HashedSparseVector s2p = new HashedSparseVector
(new int[] { 13 },
new double[] { 0.8 });
s.plusEqualsSparse (s2p, 1.0);
checkAnswer (s, new double[] { 3, 5, 7, 6.8, 7 });
HashedSparseVector s3p = new HashedSparseVector
(new int[] { 14 },
new double[] { 0.8 });
s.plusEqualsSparse (s3p, 1.0);
checkAnswer (s, new double[] { 3, 5, 7, 6.8, 7 }); // verify s unchanged
HashedSparseVector s4 = new HashedSparseVector
(new int[] { 7, 14, 15 },
new double[] { 0.2, 0.8, 1.2 });
s.plusEqualsSparse (s4, 1.0);
checkAnswer (s, new double[] { 3, 5, 7.2, 6.8, 8.2 });
HashedSparseVector s5 = new HashedSparseVector (new int[] { 7 }, new double[] { 0.2 });
s5.plusEqualsSparse (s1);
for (int i = 0; i < s5.numLocations(); i++) {
assertEquals (7, s5.indexAtLocation (i));
assertEquals (3.2, s5.valueAtLocation (i), 0.0);
}
HashedSparseVector s6 = new HashedSparseVector (new int[] { 7 }, new double[] { 0.2 });
s6.plusEqualsSparse (s1, 3.5);
for (int i = 0; i < s6.numLocations(); i++) {
assertEquals (7, s6.indexAtLocation (i));
assertEquals (10.7, s6.valueAtLocation (i), 0.0);
}
}
public void testPlusEqualsAfterClone ()
{
s1.indexVector ();
HashedSparseVector s = (HashedSparseVector) s1.cloneMatrixZeroed ();
s.plusEqualsSparse (s1);
s.plusEqualsSparse (s2, 2.0);
checkAnswer (s, new double[] { 3, 5, 7, 6, 7 });
}
public void testDotProduct () {
HashedSparseVector t1 = new HashedSparseVector (new int[] { 7 }, new double[] { 0.2 });
assertEquals (0.6, t1.dotProduct (s1), 0.00001);
assertEquals (0.6, s1.dotProduct (t1), 0.00001);
assertEquals (19.0, s1.dotProduct (s2), 0.00001);
assertEquals (19.0, s2.dotProduct (s1), 0.00001);
assertEquals (11.9, s1.dotProduct (d1), 0.00001);
assertEquals (10.1, s2.dotProduct (d1), 0.00001);
}
public void testIncrementValue ()
{
HashedSparseVector s = (HashedSparseVector) s1.cloneMatrix ();
s.incrementValue (5, 0.75);
double[] ans = new double[] {1, 2.75, 3, 4, 5};
for (int i = 0; i < s.numLocations(); i++) {
assertTrue (s.valueAtLocation (i) == ans[i]);
}
}
public void testSetValue ()
{
HashedSparseVector s = (HashedSparseVector) s1.cloneMatrix ();
s.setValue (5, 0.3);
double[] ans = new double[] {1, 0.3, 3, 4, 5};
for (int i = 0; i < s.numLocations(); i++) {
assertTrue (s.valueAtLocation (i) == ans[i]);
}
}
private static int[] idx2 = { 3, 7, 12, 15, 18 };
public void testBinaryVector ()
{
HashedSparseVector binary1 = new HashedSparseVector (idxs, null, idxs.length, idxs.length,
false, false, false);
HashedSparseVector binary2 = new HashedSparseVector (idx2, null, idx2.length, idx2.length,
false, false, false);
assertEquals (3, binary1.dotProduct (binary2), 0.0001);
assertEquals (3, binary2.dotProduct (binary1), 0.0001);
assertEquals (15.0, binary1.dotProduct (s1), 0.0001);
assertEquals (15.0, s1.dotProduct (binary1), 0.0001);
assertEquals (9.0, binary2.dotProduct (s1), 0.0001);
assertEquals (9.0, s1.dotProduct (binary2), 0.0001);
HashedSparseVector dblVec = (HashedSparseVector) s1.cloneMatrix ();
dblVec.plusEqualsSparse (binary1);
checkAnswer (dblVec, new double[] { 2, 3, 4, 5, 6 });
HashedSparseVector dblVec2 = (HashedSparseVector) s1.cloneMatrix ();
dblVec2.plusEqualsSparse (binary2);
checkAnswer (dblVec2, new double[] { 2, 2, 4, 4, 6 });
}
public void testCloneMatrixZeroed ()
{
HashedSparseVector s = (HashedSparseVector) s1.cloneMatrixZeroed ();
for (int i = 0; i < s.numLocations(); i++) {
assertTrue (s.valueAtLocation (i) == 0.0);
assertTrue (s.indexAtLocation (i) == idxs [i]);
}
}
public void testSerializable () throws Exception
{
// Write out the sparse vector s1
HashedSparseVector s2 = (HashedSparseVector) TestSerializable.cloneViaSerialization (s1);
assertEquals (s1.numLocations (), s2.numLocations ());
for (int loc = 0; loc < s1.numLocations (); loc++) {
assertEquals (s1.valueAtLocation (loc), s2.valueAtLocation (loc), 0.001);
}
}
// tests index2location getting screwed up when old (v 1.3) instances are de-serialized
public void testPlusEqualsFromSaved () throws IOException, ClassNotFoundException
{
HashedSparseVector s1 = (HashedSparseVector) FileUtils.readObject (oldSv);
HashedSparseVector s2 = new HashedSparseVector (new int[] { 1 }, new double[] { 1.0 });
s1.plusEqualsSparse (s2, 1.0);
assertEquals (1.0, s1.value (0), 1e-5);
assertEquals (0.0, s1.value (1), 1e-5);
}
// This is a hashedSparseVector from cvs version 1.3. It was saved by saveOldSv(), below.
private static File oldSv = new File ("test/resources/edu/umass/cs/mallet/base/types/hashed.sv.old.ser");
public static void saveOldSv ()
{
HashedSparseVector sv = new HashedSparseVector (new int[] { 0, 2 }, new double[] { 1.0, 2.0 });
sv.indexVector ();
FileUtils.writeObject (oldSv, sv);
}
public static Test suite ()
{
return new TestSuite (TestHashedSparseVector.class);
}
protected void setUp ()
{
}
public static void main (String[] args)
{
// saveOldSv ();
junit.textui.TestRunner.run (suite());
}
}
| 7,392 | 31.857778 | 107 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestSparseMatrixn.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types.tests;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import java.util.Arrays;
import java.io.IOException;
import cc.mallet.types.MatrixOps;
import cc.mallet.types.Matrixn;
import cc.mallet.types.SparseMatrixn;
import gnu.trove.TIntArrayList;
import gnu.trove.TDoubleArrayList;
/**
* Created: Aug 30, 2004
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestSparseMatrixn.java,v 1.1 2007/10/22 21:37:55 mccallum Exp $
*/
public class TestSparseMatrixn extends TestCase {
public TestSparseMatrixn (String name)
{
super (name);
}
public void testIndexing1d ()
{
double m1[] = new double[]{1.0, 2.0, 3.0, 4.0};
int idx1[] = new int[1];
SparseMatrixn a = new SparseMatrixn (m1);
a.singleToIndices (3, idx1);
assertEquals (3, idx1[0]);
assertEquals (3, a.singleIndex (idx1));
}
public void testIndexing2d ()
{
int[] sizes = new int[]{2, 3};
double[] m1 = new double[6];
for (int i = 0; i < 6; i++) {
m1[i] = 2.0 * i;
}
SparseMatrixn a = new SparseMatrixn (sizes, m1);
int[] idx1 = new int[2];
a.singleToIndices (5, idx1);
System.out.println (idx1[0]+" , "+idx1[1]);
int[] trueIdx = new int[] {1, 2};
assertTrue (Arrays.equals (trueIdx, idx1));
assertEquals (5, a.singleIndex (idx1));
assertEquals (10.0, a.value (idx1), 1e-12);
}
public void testIndexing3d ()
{
SparseMatrixn a = make3dMatrix ();
int[] idx1 = new int[3];
a.singleToIndices (21, idx1);
int[] trueIdx = new int[]{1, 2, 1};
assertTrue (Arrays.equals (trueIdx, idx1));
assertEquals (21, a.singleIndex (idx1));
assertEquals (0, a.value (idx1), 1e-12);
int[] idx2 = new int[]{1, 2, 2};
assertEquals (22, a.singleIndex (idx2));
assertEquals (44.0, a.value (idx2), 1e-12);
}
private SparseMatrixn make3dMatrix ()
{
int[] sizes = new int[]{2, 3, 4};
TIntArrayList idxs = new TIntArrayList ();
TDoubleArrayList vals = new TDoubleArrayList ();
for (int i = 0; i < 24; i++) {
if (i % 3 != 0) {
idxs.add (i);
vals.add (2.0 * i);
}
}
SparseMatrixn a = new SparseMatrixn (sizes, idxs.toNativeArray (), vals.toNativeArray ());
return a;
}
public void testSparseMatrixnSerializable () throws IOException, ClassNotFoundException
{
SparseMatrixn a = make3dMatrix ();
SparseMatrixn b = (SparseMatrixn) TestSerializable.cloneViaSerialization (a);
assertEquals (a.singleSize(), b.singleSize());
for (int i = 0; i < a.singleSize (); i++) {
int[] idxa = new int [a.getNumDimensions ()];
int[] idxb = new int [a.getNumDimensions ()];
a.singleToIndices (i, idxa);
b.singleToIndices (i, idxb);
assertTrue (Arrays.equals (idxa, idxb));
assertEquals (a.value (idxa), b.value (idxb), 1e-12);
}
}
public static Test suite ()
{
return new TestSuite (TestSparseMatrixn.class);
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestSparseMatrixn (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 3,823 | 27.117647 | 94 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestLabelVector.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types.tests;
import cc.mallet.types.Label;
import cc.mallet.types.LabelAlphabet;
import cc.mallet.types.LabelVector;
import junit.framework.*;
public class TestLabelVector extends TestCase
{
public TestLabelVector (String name)
{
super (name);
}
private LabelAlphabet ld;
private LabelVector lv;
protected void setUp ()
{
ld = new LabelAlphabet ();
lv = new LabelVector (ld,
new int[] {
ld.lookupIndex ("a"),
ld.lookupIndex ("b"),
ld.lookupIndex ("c"),
ld.lookupIndex ("d")},
new double[] {3, 4, 2, 1});
}
public void testGetBestLabel ()
{
assertTrue (lv.getBestLabel() == ld.lookupLabel ("b"));
}
public void testGetLabelAtRank ()
{
assertTrue (lv.getLabelAtRank(1) == ld.lookupLabel ("a"));
}
public void testValue ()
{
assertEquals (4.0, lv.value (ld.lookupLabel ("b")), 1e-5);
}
public static Test suite ()
{
return new TestSuite (TestLabelVector.class);
}
public static void main (String[] args)
{
junit.textui.TestRunner.run (suite());
}
}
| 1,625 | 22.228571 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestAugmentableFeatureVector.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types.tests;
import cc.mallet.types.Alphabet;
import cc.mallet.types.AugmentableFeatureVector;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.SparseVector;
import junit.framework.*;
/**
* Created: Dec 30, 2004
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestAugmentableFeatureVector.java,v 1.1 2007/10/22 21:37:55 mccallum Exp $
*/
public class TestAugmentableFeatureVector extends TestCase {
public TestAugmentableFeatureVector (String name)
{
super (name);
}
public static Test suite ()
{
return new TestSuite (TestAugmentableFeatureVector.class);
}
public void testDotProductBinaryToSV ()
{
SparseVector v = makeSparseVectorToN (5);
AugmentableFeatureVector afv = makeAfv (new int[] { 1, 3 }, true);
double dp = afv.dotProduct (v);
assertEquals (4.0, dp, 1e-5);
new AugmentableFeatureVector (new Alphabet(), true);
}
public void testDotProductSparseASVToSV ()
{
SparseVector v = makeSparseVectorToN (7);
AugmentableFeatureVector afv = makeAfv (new int[] { 1, 3 }, false);
double dp = afv.dotProduct (v);
assertEquals (4.0, dp, 1e-5);
afv = makeAfv (new int[] { 2, 5 }, false);
dp = afv.dotProduct (v);
assertEquals (7.0, dp, 1e-5);
}
private AugmentableFeatureVector makeAfv (int[] ints, boolean binary)
{
AugmentableFeatureVector afv = new AugmentableFeatureVector (new Alphabet(), binary);
for (int i = 0; i < ints.length; i++) {
int idx = ints[i];
afv.add (idx, 1.0);
}
return afv;
}
private SparseVector makeSparseVectorToN (int N)
{
double[] vals = new double [N];
for (int i = 0; i < N; i++) {
vals [i] = i;
}
return new SparseVector (vals);
}
public void testAddWithPrefix ()
{
Alphabet dict = new Alphabet ();
dict.lookupIndex ("ZERO");
dict.lookupIndex ("ONE");
dict.lookupIndex ("TWO");
dict.lookupIndex ("THREE");
FeatureVector fv = new FeatureVector (dict, new int[] { 1,3 });
AugmentableFeatureVector afv = new AugmentableFeatureVector (new Alphabet (), true);
afv.add (fv, "O:");
assertEquals (4, dict.size());
assertEquals (2, afv.getAlphabet ().size());
assertEquals ("O:ONE\nO:THREE\n", afv.toString ());
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestAugmentableFeatureVector (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 3,119 | 28.158879 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestIndexedSparseVector.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types.tests;
import junit.framework.*;
import java.io.IOException;
import java.util.Arrays;
import cc.mallet.types.DenseVector;
import cc.mallet.types.IndexedSparseVector;
import cc.mallet.types.SparseVector;
public class TestIndexedSparseVector extends TestCase
{
public TestIndexedSparseVector (String name) {
super (name);
}
double[] dbl1 = new double[] {1, 2, 3, 4, 5};
double[] dbl2 = new double[] {1, 1.5, 2, 1, 1};
double[] dbl3 = new double[] { 2.0, 2.5, 3.0, 4.7, 3.5,
3.6, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, };
int[] idxs = new int[] {3, 5, 7, 13, 15};
IndexedSparseVector s1 = new IndexedSparseVector (idxs, dbl1, dbl1.length, dbl1.length,
true, true, true);
IndexedSparseVector s2 = new IndexedSparseVector (idxs, dbl2, dbl2.length, dbl2.length,
true, true, true);
SparseVector d1 = new SparseVector (dbl3, true);
public void testLocation ()
{
int curidx = 0;
int max = idxs[idxs.length - 1];
for (int idx = 0; idx < max; idx++) {
if (idx == idxs[curidx]) {
assertEquals (dbl1[curidx], s1.value (idx), 1e-10);
curidx++;
} else {
assertEquals (0, s1.value (idx), 1e-10);
}
}
assertEquals (0, s1.value (max+1), 1e-10);
}
private void checkAnswer (IndexedSparseVector actual, double[] ans)
{
assertEquals ("Wrong number of locations:",
ans.length, actual.numLocations());
for (int i = 0; i < actual.numLocations(); i++) {
assertEquals ("Value incorrect at location "+i+": ",
ans[i], actual.valueAtLocation (i) , 0.0);
}
}
public void testPlusEquals ()
{
IndexedSparseVector s = (IndexedSparseVector) s1.cloneMatrix ();
s.plusEqualsSparse (s2, 2.0);
checkAnswer (s, new double[] { 3, 5, 7, 6, 7 });
IndexedSparseVector s2p = new IndexedSparseVector
(new int[] { 13 },
new double[] { 0.8 });
s.plusEqualsSparse (s2p, 1.0);
checkAnswer (s, new double[] { 3, 5, 7, 6.8, 7 });
IndexedSparseVector s3p = new IndexedSparseVector
(new int[] { 14 },
new double[] { 0.8 });
s.plusEqualsSparse (s3p, 1.0);
checkAnswer (s, new double[] { 3, 5, 7, 6.8, 7 }); // verify s unchanged
IndexedSparseVector s4 = new IndexedSparseVector
(new int[] { 7, 14, 15 },
new double[] { 0.2, 0.8, 1.2 });
s.plusEqualsSparse (s4, 1.0);
checkAnswer (s, new double[] { 3, 5, 7.2, 6.8, 8.2 });
IndexedSparseVector s5 = new IndexedSparseVector (new int[] { 7 }, new double[] { 0.2 });
s5.plusEqualsSparse (s1);
for (int i = 0; i < s5.numLocations(); i++) {
assertEquals (7, s5.indexAtLocation (i));
assertEquals (3.2, s5.valueAtLocation (i), 0.0);
}
IndexedSparseVector s6 = new IndexedSparseVector (new int[] { 7 }, new double[] { 0.2 });
s6.plusEqualsSparse (s1, 3.5);
for (int i = 0; i < s6.numLocations(); i++) {
assertEquals (7, s6.indexAtLocation (i));
assertEquals (10.7, s6.valueAtLocation (i), 0.0);
}
}
public void testDotProduct () {
IndexedSparseVector t1 = new IndexedSparseVector (new int[] { 7 }, new double[] { 0.2 });
assertEquals (0.6, t1.dotProduct (s1), 0.00001);
assertEquals (0.6, s1.dotProduct (t1), 0.00001);
assertEquals (19.0, s1.dotProduct (s2), 0.00001);
assertEquals (19.0, s2.dotProduct (s1), 0.00001);
assertEquals (11.9, s1.dotProduct (d1), 0.00001);
assertEquals (10.1, s2.dotProduct (d1), 0.00001);
}
public void testIncrementValue ()
{
IndexedSparseVector s = (IndexedSparseVector) s1.cloneMatrix ();
s.incrementValue (5, 0.75);
double[] ans = new double[] {1, 2.75, 3, 4, 5};
for (int i = 0; i < s.numLocations(); i++) {
assertTrue (s.valueAtLocation (i) == ans[i]);
}
}
public void testSetValue ()
{
IndexedSparseVector s = (IndexedSparseVector) s1.cloneMatrix ();
s.setValue (5, 0.3);
double[] ans = new double[] {1, 0.3, 3, 4, 5};
for (int i = 0; i < s.numLocations(); i++) {
assertTrue (s.valueAtLocation (i) == ans[i]);
}
}
private static int[] idx2 = { 3, 7, 12, 15, 18 };
public void testBinaryVector ()
{
IndexedSparseVector binary1 = new IndexedSparseVector (idxs, null, idxs.length, idxs.length,
false, false, false);
IndexedSparseVector binary2 = new IndexedSparseVector (idx2, null, idx2.length, idx2.length,
false, false, false);
assertEquals (3, binary1.dotProduct (binary2), 0.0001);
assertEquals (3, binary2.dotProduct (binary1), 0.0001);
assertEquals (15.0, binary1.dotProduct (s1), 0.0001);
assertEquals (15.0, s1.dotProduct (binary1), 0.0001);
assertEquals (9.0, binary2.dotProduct (s1), 0.0001);
assertEquals (9.0, s1.dotProduct (binary2), 0.0001);
IndexedSparseVector dblVec = (IndexedSparseVector) s1.cloneMatrix ();
dblVec.plusEqualsSparse (binary1);
checkAnswer (dblVec, new double[] { 2, 3, 4, 5, 6 });
IndexedSparseVector dblVec2 = (IndexedSparseVector) s1.cloneMatrix ();
dblVec2.plusEqualsSparse (binary2);
checkAnswer (dblVec2, new double[] { 2, 2, 4, 4, 6 });
}
public void testCloneMatrixZeroed ()
{
IndexedSparseVector s = (IndexedSparseVector) s1.cloneMatrixZeroed ();
for (int i = 0; i < s.numLocations(); i++) {
assertTrue (s.valueAtLocation (i) == 0.0);
assertTrue (s.indexAtLocation (i) == idxs [i]);
}
}
public void testEmptyLocations ()
{
IndexedSparseVector s = new IndexedSparseVector (new int[0], new double [0]);
assertEquals (0.0, s.value (38), 1e-10);
assertEquals (0.0, s.dotProduct (s1), 1e-10);
}
public void testSerializable () throws IOException, ClassNotFoundException
{
IndexedSparseVector s = (IndexedSparseVector) s1.cloneMatrix ();
IndexedSparseVector sPrime = (IndexedSparseVector) TestSerializable.cloneViaSerialization (s);
assertEquals (s.numLocations (), sPrime.numLocations ());
assertTrue (Arrays.equals (s.getIndices (), sPrime.getIndices ()));
assertTrue (Arrays.equals (s.getValues (), sPrime.getValues ()));
}
public void testSerializable2 () throws IOException, ClassNotFoundException
{
SparseVector[][] vecs = new SparseVector[2][];
vecs[0] = new SparseVector[] {
(SparseVector) s1.cloneMatrix (),
(SparseVector) s1.cloneMatrix (),
};
vecs[1] = new SparseVector[] {
(SparseVector) s1.cloneMatrix (),
};
SparseVector[][] vecsPrime = (SparseVector[][]) TestSerializable.cloneViaSerialization (vecs);
assertEquals (vecs.length, vecsPrime.length);
}
public static Test suite ()
{
return new TestSuite (TestIndexedSparseVector.class);
}
protected void setUp ()
{
}
public static void main (String[] args)
{
junit.textui.TestRunner.run (suite());
}
}
| 7,699 | 32.920705 | 98 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestLabelAlphabet.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://mallet.cs.umass.edu/
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types.tests;
import junit.framework.TestCase;
import junit.framework.Test;
import junit.framework.TestSuite;
import java.io.IOException;
import java.io.Serializable;
import cc.mallet.types.Alphabet;
import cc.mallet.types.Label;
import cc.mallet.types.LabelAlphabet;
/**
* Created: Nov 24, 2004
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestLabelAlphabet.java,v 1.1 2007/10/22 21:37:55 mccallum Exp $
*/
public class TestLabelAlphabet extends TestCase {
public TestLabelAlphabet (String name)
{
super (name);
}
private static class Labelee implements Serializable {
LabelAlphabet dict;
Label theLabel;
public Labelee (LabelAlphabet dict, Label theLabel)
{
this.dict = dict;
this.theLabel = theLabel;
}
}
/** Tests how serializing labels separately can lead to big losses.
* This currently fails. I'm not sure what to do about this. -cas
*/
public void testReadResolve () throws IOException, ClassNotFoundException
{
LabelAlphabet dict = new LabelAlphabet ();
dict.lookupIndex ("TEST1");
dict.lookupIndex ("TEST2");
dict.lookupIndex ("TEST3");
Label t1 = dict.lookupLabel ("TEST1");
Labelee l = new Labelee (dict, t1);
Labelee l2 = (Labelee) TestSerializable.cloneViaSerialization (l);
assertTrue (l.dict == l2.dict);
assertTrue (dict.lookupLabel("TEST1") == l.theLabel);
assertTrue (dict.lookupLabel("TEST1") == l2.theLabel);
assertTrue (l.theLabel == l2.theLabel);
}
public static Test suite ()
{
return new TestSuite (TestLabelAlphabet.class);
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestLabelAlphabet (args[i]));
}
} else {
theSuite = (TestSuite) TestLabelAlphabet.suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 2,448 | 26.829545 | 80 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestFeatureVector.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types.tests;
import cc.mallet.types.Alphabet;
import cc.mallet.types.FeatureSequence;
import cc.mallet.types.FeatureVector;
import junit.framework.*;
public class TestFeatureVector extends TestCase
{
public TestFeatureVector (String name)
{
super (name);
}
Alphabet dict;
FeatureSequence fs;
FeatureVector fv;
protected void setUp ()
{
dict = new Alphabet ();
fs = new FeatureSequence (dict, 2);
fs.add (dict.lookupIndex ("a"));
fs.add (dict.lookupIndex ("n"));
fs.add (dict.lookupIndex ("d"));
fs.add (dict.lookupIndex ("r"));
fs.add (dict.lookupIndex ("e"));
fs.add (dict.lookupIndex ("w"));
fs.add (dict.lookupIndex ("m"));
fs.add (dict.lookupIndex ("c"));
fs.add (dict.lookupIndex ("c"));
fs.add (dict.lookupIndex ("a"));
fs.add (dict.lookupIndex ("l"));
fs.add (dict.lookupIndex ("l"));
fs.add (dict.lookupIndex ("u"));
fs.add (dict.lookupIndex ("m"));
//System.out.println (fs.toString());
fv = new FeatureVector (fs);
//System.out.println (fs.toString());
//System.out.println (fv.toString());
}
public void testDuplicateValueFromFeatureSequence ()
{
assertTrue (fv.value (dict.lookupIndex ("a")) == 2.0);
}
public void testSingleValueFromFeatureSequence ()
{
assertTrue (fv.value (dict.lookupIndex ("n")) == 1.0);
}
public void testSizeFromFeatureSequence ()
{
assertTrue (fv.numLocations() == 10);
}
public static Test suite ()
{
return new TestSuite (TestFeatureVector.class);
}
public static void main (String[] args)
{
junit.textui.TestRunner.run (suite());
}
}
| 2,104 | 24.361446 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestMultinomial.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types.tests;
import cc.mallet.types.Alphabet;
import cc.mallet.types.FeatureSequence;
import cc.mallet.types.Multinomial;
import junit.framework.*;
public class TestMultinomial extends TestCase
{
public TestMultinomial (String name)
{
super (name);
}
public void testMultinomial ()
{
double[] c = new double[] {.2, .3, .1, .4};
Multinomial m = new Multinomial (c);
assertTrue (m.probability (0) == .2);
}
public void testEstimating ()
{
Alphabet dict = new Alphabet ();
Multinomial.Estimator e = new Multinomial.LaplaceEstimator (dict);
FeatureSequence fs = new FeatureSequence (dict);
fs.add (dict.lookupIndex ("a"));
fs.add (dict.lookupIndex ("n"));
fs.add (dict.lookupIndex ("d"));
fs.add (dict.lookupIndex ("r"));
fs.add (dict.lookupIndex ("e"));
fs.add (dict.lookupIndex ("w"));
fs.add (dict.lookupIndex ("m"));
fs.add (dict.lookupIndex ("c"));
fs.add (dict.lookupIndex ("c"));
fs.add (dict.lookupIndex ("a"));
fs.add (dict.lookupIndex ("l"));
fs.add (dict.lookupIndex ("l"));
fs.add (dict.lookupIndex ("u"));
fs.add (dict.lookupIndex ("m"));
//System.out.println (fs.toString());
e.increment (fs);
assertTrue (e.size() == 10);
Multinomial m = e.estimate ();
assertTrue (m.size() == 10);
assertTrue (m.probability (dict.lookupIndex ("a")) == (2.0+1)/(14.0+10));
assertTrue (m.probability ("w") == (1.0+1)/(14.0+10));
Multinomial.Logged ml = new Multinomial.Logged (m);
assertTrue (m.logProbability ("w") == ml.logProbability ("w"));
}
public static Test suite ()
{
return new TestSuite (TestMultinomial.class);
}
protected void setUp ()
{
}
public static void main (String[] args)
{
junit.textui.TestRunner.run (suite());
}
}
| 2,267 | 26.325301 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestPagedInstanceList.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types.tests;
import junit.framework.*;
import java.io.File;
import java.util.Iterator;
import cc.mallet.classify.*;
import cc.mallet.pipe.*;
import cc.mallet.pipe.iterator.PipeInputIterator;
import cc.mallet.pipe.iterator.RandomTokenSequenceIterator;
import cc.mallet.types.Alphabet;
import cc.mallet.types.Dirichlet;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Instance;
import cc.mallet.types.PagedInstanceList;
import cc.mallet.util.Randoms;
/**
* Created: Apr 19, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestPagedInstanceList.java,v 1.1 2007/10/22 21:37:55 mccallum Exp $
*/
public class TestPagedInstanceList extends TestCase {
public TestPagedInstanceList (String name)
{
super (name);
}
public static Test suite ()
{
return new TestSuite (TestPagedInstanceList.class);
}
private static Alphabet dictOfSize (int size)
{
Alphabet ret = new Alphabet ();
for (int i = 0; i < size; i++)
ret.lookupIndex ("feature"+i);
return ret;
}
public void testRandomTrained ()
{
Pipe p = new SerialPipes (new Pipe[] {
new TokenSequence2FeatureSequence (),
new FeatureSequence2FeatureVector (),
new Target2Label()});
double testAcc1 = testRandomTrainedOn (new InstanceList (p));
double testAcc2 = testRandomTrainedOn (new PagedInstanceList (p, 700, 200, new File(".")));
assertEquals (testAcc1, testAcc2, 0.01);
}
private double testRandomTrainedOn (InstanceList training)
{
ClassifierTrainer trainer = new MaxEntTrainer ();
Alphabet fd = dictOfSize (3);
String[] classNames = new String[] {"class0", "class1", "class2"};
Randoms r = new Randoms (1);
Iterator<Instance> iter = new RandomTokenSequenceIterator (r, new Dirichlet(fd, 2.0),
30, 0, 10, 200, classNames);
training.addThruPipe (iter);
InstanceList testing = new InstanceList (training.getPipe ());
testing.addThruPipe (new RandomTokenSequenceIterator (r, new Dirichlet(fd, 2.0),
30, 0, 10, 200, classNames));
System.out.println ("Training set size = "+training.size());
System.out.println ("Testing set size = "+testing.size());
Classifier classifier = trainer.train (training);
System.out.println ("Accuracy on training set:");
System.out.println (classifier.getClass().getName()
+ ": " + new Trial (classifier, training).getAccuracy());
System.out.println ("Accuracy on testing set:");
double testAcc = new Trial (classifier, testing).getAccuracy();
System.out.println (classifier.getClass().getName()
+ ": " + testAcc);
return testAcc;
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestPagedInstanceList (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 3,511 | 30.079646 | 95 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestAlphabet.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types.tests;
import junit.framework.*;
import java.io.IOException;
import cc.mallet.types.Alphabet;
/**
* Created: Nov 24, 2004
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestAlphabet.java,v 1.1 2007/10/22 21:37:55 mccallum Exp $
*/
public class TestAlphabet extends TestCase {
public TestAlphabet (String name)
{
super (name);
}
public void testNotFound ()
{
Alphabet dict = new Alphabet ();
dict.lookupIndex ("TEST1");
dict.lookupIndex ("TEST2");
dict.lookupIndex ("TEST3");
assertEquals (-1, dict.lookupIndex ("TEST4", false));
assertEquals (3, dict.size());
assertEquals (3, dict.lookupIndex ("TEST4", true));
}
// tests a bug where
public void testReadResolve () throws IOException, ClassNotFoundException
{
Alphabet dict = new Alphabet ();
dict.lookupIndex ("TEST1");
dict.lookupIndex ("TEST2");
dict.lookupIndex ("TEST3");
Alphabet dict2 = (Alphabet) TestSerializable.cloneViaSerialization (dict);
assertTrue (dict == dict2);
}
public static Test suite ()
{
return new TestSuite (TestAlphabet.class);
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestAlphabet (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 1,963 | 26.277778 | 78 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestSparseVector.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types.tests;
import java.io.*;
import cc.mallet.types.DenseVector;
import cc.mallet.types.SparseVector;
import junit.framework.*;
public class TestSparseVector extends TestCase
{
public TestSparseVector (String name) {
super (name);
}
double[] dbl1 = new double[] {1, 2, 3, 4, 5};
double[] dbl2 = new double[] {1, 1.5, 2, 1, 1};
double[] dbl3 = new double[] { 2.0, 2.5, 3.0, 4.7, 3.5,
3.6, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, };
double[] dbl4 = new double[] {1,2,3,4,Double.NEGATIVE_INFINITY};
int[] idxs = new int[] {3, 5, 7, 13, 15};
SparseVector s1 = new SparseVector (idxs, dbl1, dbl1.length, dbl1.length,
true, true, true);
SparseVector s2 = new SparseVector (idxs, dbl2, dbl2.length, dbl2.length,
true, true, true);
DenseVector d1 = new DenseVector (dbl3, true);
private void checkAnswer (SparseVector actual, double[] ans)
{
assertEquals ("Wrong number of locations:",
ans.length, actual.numLocations());
for (int i = 0; i < actual.numLocations(); i++) {
assertEquals ("Value incorrect at location "+i+": ",
ans[i], actual.valueAtLocation (i) , 0.0);
}
}
public void testPlusEquals ()
{
SparseVector s = (SparseVector) s1.cloneMatrix ();
s.plusEqualsSparse (s2, 2.0);
checkAnswer (s, new double[] { 3, 5, 7, 6, 7 });
SparseVector s2p = new SparseVector
(new int[] { 13 },
new double[] { 0.8 });
s.plusEqualsSparse (s2p, 1.0);
checkAnswer (s, new double[] { 3, 5, 7, 6.8, 7 });
SparseVector s3p = new SparseVector
(new int[] { 14 },
new double[] { 0.8 });
s.plusEqualsSparse (s3p, 1.0);
checkAnswer (s, new double[] { 3, 5, 7, 6.8, 7 }); // verify s unchanged
SparseVector s4 = new SparseVector
(new int[] { 7, 14, 15 },
new double[] { 0.2, 0.8, 1.2 });
s.plusEqualsSparse (s4, 1.0);
checkAnswer (s, new double[] { 3, 5, 7.2, 6.8, 8.2 });
SparseVector s5 = new SparseVector (new int[] { 7 }, new double[] { 0.2 });
s5.plusEqualsSparse (s1);
for (int i = 0; i < s5.numLocations(); i++) {
assertEquals (7, s5.indexAtLocation (i));
assertEquals (3.2, s5.valueAtLocation (i), 0.0);
}
SparseVector s6 = new SparseVector (new int[] { 7 }, new double[] { 0.2 });
s6.plusEqualsSparse (s1, 3.5);
for (int i = 0; i < s6.numLocations(); i++) {
assertEquals (7, s6.indexAtLocation (i));
assertEquals (10.7, s6.valueAtLocation (i), 0.0);
}
}
public void testDotProduct () {
SparseVector t1 = new SparseVector (new int[] { 7 }, new double[] { 0.2 });
assertEquals (0.6, t1.dotProduct (s1), 0.00001);
assertEquals (0.6, s1.dotProduct (t1), 0.00001);
assertEquals (19.0, s1.dotProduct (s2), 0.00001);
assertEquals (19.0, s2.dotProduct (s1), 0.00001);
assertEquals (11.9, s1.dotProduct (d1), 0.00001);
assertEquals (10.1, s2.dotProduct (d1), 0.00001);
// test dotproduct when vector with more locations has a lower
// max-index than short vector
SparseVector t2 = new SparseVector (new int[] { 3, 30 }, new double[] { 0.2, 3.5 });
SparseVector t3 = new SparseVector (null, new double[] { 1, 1, 1, 1, });
assertEquals (0.2, t3.dotProduct (t2), 0.00001);
}
public void testIncrementValue ()
{
SparseVector s = (SparseVector) s1.cloneMatrix ();
s.incrementValue (5, 0.75);
double[] ans = new double[] {1, 2.75, 3, 4, 5};
for (int i = 0; i < s.numLocations(); i++) {
assertTrue (s.valueAtLocation (i) == ans[i]);
}
}
public void testSetValue ()
{
SparseVector s = (SparseVector) s1.cloneMatrix ();
s.setValue (5, 0.3);
double[] ans = new double[] {1, 0.3, 3, 4, 5};
for (int i = 0; i < s.numLocations(); i++) {
assertTrue (s.valueAtLocation (i) == ans[i]);
}
}
public void testDenseSparseVector ()
{
SparseVector svDense = new SparseVector (null, dbl3);
double sdot = svDense.dotProduct (svDense);
double ddot = d1.dotProduct (d1);
assertEquals (sdot, ddot, 0.0001);
svDense.plusEqualsSparse (s1);
checkAnswer (svDense, new double[] { 2.0, 2.5, 3.0, 5.7, 3.5,
5.6, 0, 3, 0, 0,
0, 0, 0, 4, 0,
5, });
svDense.plusEqualsSparse (s1, 2.0);
checkAnswer (svDense, new double[] { 2.0, 2.5, 3.0, 7.7, 3.5,
9.6, 0, 9, 0, 0,
0, 0, 0, 12, 0,
15, });
double[] dbl4 = new double [dbl3.length + 1];
for (int i = 0; i < dbl4.length; i++) dbl4[i] = 2.0;
SparseVector sv4 = new SparseVector (null, dbl4);
svDense.plusEqualsSparse (sv4);
checkAnswer (svDense, new double[] { 4.0, 4.5, 5.0, 9.7, 5.5,
11.6, 2.0, 11.0, 2.0, 2.0,
2, 2, 2, 14, 2.0,
17, });
}
private static int[] idx2 = { 3, 7, 12, 15, 18 };
public void testBinaryVector ()
{
SparseVector binary1 = new SparseVector (idxs, null, idxs.length, idxs.length,
false, false, false);
SparseVector binary2 = new SparseVector (idx2, null, idx2.length, idx2.length,
false, false, false);
assertEquals (3, binary1.dotProduct (binary2), 0.0001);
assertEquals (3, binary2.dotProduct (binary1), 0.0001);
assertEquals (15.0, binary1.dotProduct (s1), 0.0001);
assertEquals (15.0, s1.dotProduct (binary1), 0.0001);
assertEquals (9.0, binary2.dotProduct (s1), 0.0001);
assertEquals (9.0, s1.dotProduct (binary2), 0.0001);
SparseVector dblVec = (SparseVector) s1.cloneMatrix ();
dblVec.plusEqualsSparse (binary1);
checkAnswer (dblVec, new double[] { 2, 3, 4, 5, 6 });
SparseVector dblVec2 = (SparseVector) s1.cloneMatrix ();
dblVec2.plusEqualsSparse (binary2);
checkAnswer (dblVec2, new double[] { 2, 2, 4, 4, 6 });
}
public void testCloneMatrixZeroed ()
{
SparseVector s = (SparseVector) s1.cloneMatrixZeroed ();
for (int i = 0; i < s.numLocations(); i++) {
assertTrue (s.valueAtLocation (i) == 0.0);
assertTrue (s.indexAtLocation (i) == idxs [i]);
}
}
public void testPrint ()
{
ByteArrayOutputStream baos = new ByteArrayOutputStream ();
PrintStream out = new PrintStream (baos);
PrintStream oldOut = System.out;
System.setOut (out);
SparseVector standard = new SparseVector (idxs, dbl2);
standard.print ();
assertEquals ("SparseVector[3] = 1.0\nSparseVector[5] = 1.5\nSparseVector[7] = 2.0\nSparseVector[13] = 1.0\nSparseVector[15] = 1.0\n", baos.toString ());
baos.reset ();
SparseVector dense = new SparseVector (null, dbl2);
dense.print ();
assertEquals ("SparseVector[0] = 1.0\nSparseVector[1] = 1.5\nSparseVector[2] = 2.0\nSparseVector[3] = 1.0\nSparseVector[4] = 1.0\n", baos.toString ());
baos.reset ();
SparseVector binary = new SparseVector (idxs, null, idxs.length, idxs.length,
false, false, false);
binary.print ();
assertEquals ("SparseVector[3] = 1.0\nSparseVector[5] = 1.0\nSparseVector[7] = 1.0\nSparseVector[13] = 1.0\nSparseVector[15] = 1.0\n", baos.toString ());
baos.reset ();
}
public void testExtendedDotProduct () {
SparseVector v1 = new SparseVector (null, dbl3);
SparseVector vInf = new SparseVector (null, dbl4);
double dp = v1.dotProduct (vInf);
assertTrue (!Double.isNaN(dp));
dp = vInf.dotProduct (v1);
assertTrue (!Double.isNaN(dp));
}
public static Test suite ()
{
return new TestSuite (TestSparseVector.class);
}
protected void setUp ()
{
}
public static void main (String[] args)
{
junit.textui.TestRunner.run (suite());
}
}
| 8,154 | 32.016194 | 155 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestFeatureSequence.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.types.tests;
import cc.mallet.types.Alphabet;
import cc.mallet.types.FeatureSequence;
import junit.framework.*;
public class TestFeatureSequence extends TestCase
{
public TestFeatureSequence (String name) {
super (name);
}
public void testNewPutSizeFreeze ()
{
Alphabet dict = new Alphabet ();
FeatureSequence fs = new FeatureSequence (dict, 10);
fs.add (dict.lookupIndex ("apple"));
fs.add (dict.lookupIndex ("bear"));
fs.add (dict.lookupIndex ("car"));
fs.add (dict.lookupIndex ("door"));
assertTrue (fs.size() == 4);
double[] weights = new double[4];
fs.addFeatureWeightsTo (weights);
assertTrue (weights[1] == 1.0);
fs.add (dict.lookupIndex ("bear"));
int[] feats = fs.toFeatureIndexSequence();
assertTrue (feats[0] == 0);
assertTrue (feats[1] == 1);
assertTrue (feats[2] == 2);
assertTrue (feats[3] == 3);
assertTrue (feats[4] == 1);
}
public static Test suite ()
{
return new TestSuite (TestFeatureSequence.class);
}
protected void setUp ()
{
}
public static void main (String[] args)
{
junit.textui.TestRunner.run (suite());
}
}
| 1,641 | 24.65625 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestInstanceListWeights.java | package cc.mallet.types.tests;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
import cc.mallet.types.*;
import cc.mallet.pipe.*;
public class TestInstanceListWeights {
@Test
public void setWeights() {
InstanceList instances = new InstanceList(new Noop());
Instance instance1 = new Instance("test", null, null, null);
Instance instance2 = new Instance("test", null, null, null);
Instance instance3 = new Instance("test", null, null, null);
instances.add(instance1, 10.0);
instances.add(instance2);
assertEquals("#1", instances.getInstanceWeight(0), 10.0, 0.0);
assertEquals("#2", instances.getInstanceWeight(instance1), 10.0, 0.0);
assertEquals("#3", instances.getInstanceWeight(1), 1.0, 0.0);
assertEquals("#4", instances.getInstanceWeight(instance2), 1.0, 0.0);
// Reset an existing weight
instances.setInstanceWeight(0, 1.0);
assertEquals("#5", instances.getInstanceWeight(0), 1.0, 0.0);
// Reset an existing default (and therefore not represented) weight
instances.setInstanceWeight(1, 5.0);
assertEquals("#6", instances.getInstanceWeight(1), 5.0, 0.0);
}
} | 1,174 | 29.921053 | 72 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestLabelsSequence.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types.tests;
import junit.framework.*;
import java.io.IOException;
import cc.mallet.types.Label;
import cc.mallet.types.LabelAlphabet;
import cc.mallet.types.Labels;
import cc.mallet.types.LabelsSequence;
/**
* Created: Sep 21, 2004
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestLabelsSequence.java,v 1.1 2007/10/22 21:37:55 mccallum Exp $
*/
public class TestLabelsSequence extends TestCase {
public TestLabelsSequence (String name)
{
super (name);
}
public void testSerializable () throws IOException, ClassNotFoundException
{
LabelAlphabet dict = new LabelAlphabet ();
Labels lbls1 = new Labels (new Label[] {
dict.lookupLabel ("A"),
dict.lookupLabel ("B"),
});
Labels lbls2 = new Labels (new Label[] {
dict.lookupLabel ("C"),
dict.lookupLabel ("A"),
});
LabelsSequence lblseq = new LabelsSequence (new Labels[] { lbls1, lbls2 });
LabelsSequence lblseq2 = (LabelsSequence) TestSerializable.cloneViaSerialization (lblseq);
assertEquals (lblseq.size(), lblseq2.size());
assertEquals (lblseq.getLabels(0).toString(), lblseq2.getLabels(0).toString ());
assertEquals (lblseq.getLabels(1).toString(), lblseq2.getLabels(1).toString ());
}
public static Test suite ()
{
return new TestSuite (TestLabelsSequence.class);
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestLabelsSequence (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 2,181 | 30.171429 | 94 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/types/tests/TestSerializable.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.types.tests;
import junit.framework.*;
import java.io.*;
/**
* Static utility for testing serializable classes in MALLET.
*
* Created: Aug 24, 2004
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestSerializable.java,v 1.1 2007/10/22 21:37:55 mccallum Exp $
*/
public class TestSerializable extends TestCase {
public TestSerializable (String name)
{
super (name);
}
public static Test suite ()
{
return new TestSuite (TestSerializable.class);
}
/**
* Clones a given object by serializing it to a byte array and reading it back.
* This is useful for testing serialization methods.
*
* @param obj
* @return A copy of obj.
* @throws IOException
* @throws ClassNotFoundException
*/
public static Object cloneViaSerialization (Serializable obj)
throws IOException, ClassNotFoundException
{
ByteArrayOutputStream boas = new ByteArrayOutputStream ();
ObjectOutputStream oos = new ObjectOutputStream (boas);
oos.writeObject (obj);
ByteArrayInputStream bias = new ByteArrayInputStream (boas.toByteArray ());
ObjectInputStream ois = new ObjectInputStream (bias);
return ois.readObject ();
}
private static class WriteMe implements Serializable {
String foo;
int bar;
public boolean equals (Object o)
{
if (this == o) return true;
if (!(o instanceof WriteMe)) return false;
final WriteMe writeMe = (WriteMe) o;
if (bar != writeMe.bar) return false;
if (foo != null ? !foo.equals (writeMe.foo) : writeMe.foo != null) return false;
return true;
}
public int hashCode ()
{
int result;
result = (foo != null ? foo.hashCode () : 0);
result = 29 * result + bar;
return result;
}
}
public void testTestSerializable () throws IOException, ClassNotFoundException
{
WriteMe w = new WriteMe ();
w.foo = "hi there";
w.bar = 1;
WriteMe w2 = (WriteMe) cloneViaSerialization (w);
assertTrue (w != w2);
assertTrue (w.equals (w2));
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestSerializable (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 2,898 | 26.349057 | 86 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/Clusterer.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.cluster;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import cc.mallet.pipe.Pipe;
import cc.mallet.types.InstanceList;
/**
* An abstract class for clustering a set of points.
* @author Jerod Weinman <A HREF="mailto:[email protected]">[email protected]</A>
*/
public abstract class Clusterer implements Serializable {
Pipe instancePipe;
/**
* Creates a new <code>Clusterer</code> instance.
*
* @param instancePipe Pipe that created the InstanceList to be
* clustered.
*/
public Clusterer(Pipe instancePipe) {
this.instancePipe = instancePipe;
}
/** Return a clustering of an InstanceList */
public abstract Clustering cluster (InstanceList trainingSet);
public Pipe getPipe () { return instancePipe; }
// SERIALIZATION
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.defaultWriteObject ();
out.writeInt (CURRENT_SERIAL_VERSION);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject ();
int version = in.readInt ();
}
}
| 1,699 | 28.824561 | 93 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/KMeans.java | /*
* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
* This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
* http://www.cs.umass.edu/~mccallum/mallet This software is provided under the
* terms of the Common Public License, version 1.0, as published by
* http://www.opensource.org. For further information, see the file `LICENSE'
* included with this distribution.
*/
/**
* Clusters a set of point via k-Means. The instances that are clustered are
* expected to be of the type FeatureVector.
*
* EMPTY_SINGLE and other changes implemented March 2005 Heuristic cluster
* selection implemented May 2005
*
* @author Jerod Weinman <A
* HREF="mailto:[email protected]">[email protected]</A>
* @author Mike Winter <a href =
* "mailto:[email protected]">[email protected]</a>
*
*/
package cc.mallet.cluster;
import java.util.ArrayList;
import java.util.Random;
import java.util.logging.Logger;
import cc.mallet.pipe.Pipe;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Metric;
import cc.mallet.types.SparseVector;
import cc.mallet.util.VectorStats;
/**
* KMeans Clusterer
*
* Clusters the points into k clusters by minimizing the total intra-cluster
* variance. It uses a given {@link Metric} to find the distance between
* {@link Instance}s, which should have {@link SparseVector}s in the data
* field.
*
*/
public class KMeans extends Clusterer {
private static final long serialVersionUID = 1L;
// Stop after movement of means is less than this
static double MEANS_TOLERANCE = 1e-2;
// Maximum number of iterations
static int MAX_ITER = 100;
// Minimum fraction of points that move
static double POINTS_TOLERANCE = .005;
/**
* Treat an empty cluster as an error condition.
*/
public static final int EMPTY_ERROR = 0;
/**
* Drop an empty cluster
*/
public static final int EMPTY_DROP = 1;
/**
* Place the single instance furthest from the previous cluster mean
*/
public static final int EMPTY_SINGLE = 2;
Random randinator;
Metric metric;
int numClusters;
int emptyAction;
ArrayList<SparseVector> clusterMeans;
private static Logger logger = Logger
.getLogger("edu.umass.cs.mallet.base.cluster.KMeans");
/**
* Construct a KMeans object
*
* @param instancePipe Pipe for the instances being clustered
* @param numClusters Number of clusters to use
* @param metric Metric object to measure instance distances
* @param emptyAction Specify what should happen when an empty cluster occurs
*/
public KMeans(Pipe instancePipe, int numClusters, Metric metric,
int emptyAction) {
super(instancePipe);
this.emptyAction = emptyAction;
this.metric = metric;
this.numClusters = numClusters;
this.clusterMeans = new ArrayList<SparseVector>(numClusters);
this.randinator = new Random();
}
/**
* Construct a KMeans object
*
* @param instancePipe Pipe for the instances being clustered
* @param numClusters Number of clusters to use
* @param metric Metric object to measure instance distances <p/> If an empty
* cluster occurs, it is considered an error.
*/
public KMeans(Pipe instancePipe, int numClusters, Metric metric) {
this(instancePipe, numClusters, metric, EMPTY_ERROR);
}
/**
* Cluster instances
*
* @param instances List of instances to cluster
*/
@Override
public Clustering cluster(InstanceList instances) {
assert (instances.getPipe() == this.instancePipe);
// Initialize clusterMeans
initializeMeansSample(instances, this.metric);
int clusterLabels[] = new int[instances.size()];
ArrayList<InstanceList> instanceClusters = new ArrayList<InstanceList>(
numClusters);
int instClust;
double instClustDist, instDist;
double deltaMeans = Double.MAX_VALUE;
double deltaPoints = (double) instances.size();
int iterations = 0;
SparseVector clusterMean;
for (int c = 0; c < numClusters; c++) {
instanceClusters.add(c, new InstanceList(instancePipe));
}
logger.info("Entering KMeans iteration");
while (deltaMeans > MEANS_TOLERANCE && iterations < MAX_ITER
&& deltaPoints > instances.size() * POINTS_TOLERANCE) {
iterations++;
deltaPoints = 0;
// For each instance, measure its distance to the current cluster
// means, and subsequently assign it to the closest cluster
// by adding it to an corresponding instance list
// The mean of each cluster InstanceList is then updated.
for (int n = 0; n < instances.size(); n++) {
instClust = 0;
instClustDist = Double.MAX_VALUE;
for (int c = 0; c < numClusters; c++) {
instDist = metric.distance(clusterMeans.get(c),
(SparseVector) instances.get(n).getData());
if (instDist < instClustDist) {
instClust = c;
instClustDist = instDist;
}
}
// Add to closest cluster & label it such
instanceClusters.get(instClust).add(instances.get(n));
if (clusterLabels[n] != instClust) {
clusterLabels[n] = instClust;
deltaPoints++;
}
}
deltaMeans = 0;
for (int c = 0; c < numClusters; c++) {
if (instanceClusters.get(c).size() > 0) {
clusterMean = VectorStats.mean(instanceClusters.get(c));
deltaMeans += metric.distance(clusterMeans.get(c), clusterMean);
clusterMeans.set(c, clusterMean);
instanceClusters.set(c, new InstanceList(instancePipe));
} else {
logger.info("Empty cluster found.");
switch (emptyAction) {
case EMPTY_ERROR:
return null;
case EMPTY_DROP:
logger.fine("Removing cluster " + c);
clusterMeans.remove(c);
instanceClusters.remove(c);
for (int n = 0; n < instances.size(); n++) {
assert (clusterLabels[n] != c) : "Cluster size is "
+ instanceClusters.get(c).size()
+ "+ yet clusterLabels[n] is " + clusterLabels[n];
if (clusterLabels[n] > c)
clusterLabels[n]--;
}
numClusters--;
c--; // <-- note this trickiness. bad style? maybe.
// it just means now that we've deleted the entry,
// we have to repeat the index to get the next entry.
break;
case EMPTY_SINGLE:
// Get the instance the furthest from any centroid
// and make it a new centroid.
double newCentroidDist = 0;
int newCentroid = 0;
InstanceList cacheList = null;
for (int clusters = 0; clusters < clusterMeans.size(); clusters++) {
SparseVector centroid = clusterMeans.get(clusters);
InstanceList centInstances = instanceClusters.get(clusters);
// Dont't create new empty clusters.
if (centInstances.size() <= 1)
continue;
for (int n = 0; n < centInstances.size(); n++) {
double currentDist = metric.distance(centroid,
(SparseVector) centInstances.get(n).getData());
if (currentDist > newCentroidDist) {
newCentroid = n;
newCentroidDist = currentDist;
cacheList = centInstances;
}
}
}
if (cacheList == null) {
logger.info("Can't find an instance to move. Exiting.");
// Can't find an instance to move.
return null;
} else clusterMeans.set(c, (SparseVector) cacheList.get(
newCentroid).getData());
default:
return null;
}
}
}
logger.info("Iter " + iterations + " deltaMeans = " + deltaMeans);
}
if (deltaMeans <= MEANS_TOLERANCE)
logger.info("KMeans converged with deltaMeans = " + deltaMeans);
else if (iterations >= MAX_ITER)
logger.info("Maximum number of iterations (" + MAX_ITER + ") reached.");
else if (deltaPoints <= instances.size() * POINTS_TOLERANCE)
logger.info("Minimum number of points (np*" + POINTS_TOLERANCE + "="
+ (int) (instances.size() * POINTS_TOLERANCE)
+ ") moved in last iteration. Saying converged.");
return new Clustering(instances, numClusters, clusterLabels);
}
/**
* Uses a MAX-MIN heuristic to seed the initial cluster means..
*
* @param instList List of instances.
* @param metric Distance metric.
*/
private void initializeMeansSample(InstanceList instList, Metric metric) {
// InstanceList has no remove() and null instances aren't
// parsed out by most Pipes, so we have to pre-process
// here and possibly leave some instances without
// cluster assignments.
ArrayList<Instance> instances = new ArrayList<Instance>(instList.size());
for (int i = 0; i < instList.size(); i++) {
Instance ins = instList.get(i);
SparseVector sparse = (SparseVector) ins.getData();
if (sparse.numLocations() == 0)
continue;
instances.add(ins);
}
// Add next center that has the MAX of the MIN of the distances from
// each of the previous j-1 centers (idea from Andrew Moore tutorial,
// not sure who came up with it originally)
for (int i = 0; i < numClusters; i++) {
double max = 0;
int selected = 0;
for (int k = 0; k < instances.size(); k++) {
double min = Double.MAX_VALUE;
Instance ins = instances.get(k);
SparseVector inst = (SparseVector) ins.getData();
for (int j = 0; j < clusterMeans.size(); j++) {
SparseVector centerInst = clusterMeans.get(j);
double dist = metric.distance(centerInst, inst);
if (dist < min)
min = dist;
}
if (min > max) {
selected = k;
max = min;
}
}
Instance newCenter = instances.remove(selected);
clusterMeans.add((SparseVector) newCenter.getData());
}
}
/**
* Return the ArrayList of cluster means after a run of the algorithm.
*
* @return An ArrayList of Instances.
*/
public ArrayList<SparseVector> getClusterMeans() {
return this.clusterMeans;
}
}
| 10,579 | 30.117647 | 82 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/Clusterings.java | package cc.mallet.cluster;
import java.io.Serializable;
public class Clusterings implements Serializable {
private static final long serialVersionUID = 1L;
Clustering[] clusterings;
public Clusterings (Clustering[] clusterings) {
this.clusterings = clusterings;
}
public Clustering get (int i) { return clusterings[i]; }
public void set (int i, Clustering clustering) { this.clusterings[i] = clustering; }
public int size () { return clusterings.length; }
}
| 481 | 20.909091 | 85 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/Clustering.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/** A clustering of a set of points (instances).
@author Jerod Weinman <A HREF="mailto:[email protected]">[email protected]</A>
*/
package cc.mallet.cluster;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.Arrays;
import cc.mallet.types.InstanceList;
public class Clustering implements Serializable {
protected int numLabels;
protected int labels[];
protected InstanceList instances;
/** Clustering constructor.
*
* @param instances Instances that are clustered
* @param numLabels Number of clusters
* @param labels Assignment of instances to clusters; many-to-one with
* range [0,numLabels).
*/
public Clustering (InstanceList instances, int numLabels, int[] labels) {
if (instances.size() != labels.length)
throw new IllegalArgumentException("Instance list length does not match cluster labeling");
if (numLabels < 1)
throw new IllegalArgumentException("Number of labels must be strictly positive.");
for (int i = 0 ; i < labels.length ; i++)
if (labels[i] < 0 || labels[i] >= numLabels)
throw new IllegalArgumentException("Label mapping must have range [0,numLabels).");
this.instances = instances;
this.numLabels = numLabels;
this.labels = labels;
}
// GETTERS
public InstanceList getInstances () { return this.instances; }
/** Return an list of instances with a particular label. */
public InstanceList getCluster(int label) {
InstanceList cluster = new InstanceList(instances.getPipe());
for (int n=0 ; n<instances.size() ; n++)
if (labels[n] == label)
cluster.add(instances.get(n));
return cluster;
}
/** Returns an array of instance lists corresponding to clusters. */
public InstanceList[] getClusters() {
InstanceList[] clusters = new InstanceList[numLabels];
for (int c= 0 ; c<numLabels ; c++)
clusters[c] = getCluster(c);
return clusters;
}
/** Get the cluster label for a particular instance. */
public int getLabel(int index) { return labels[index]; }
public int[] getLabels() { return labels; }
public int getNumClusters() { return numLabels; }
public int getNumInstances() { return instances.size(); }
public int size (int label) {
int size = 0;
for (int i = 0; i < labels.length; i++)
if (labels[i] == label)
size++;
return size;
}
public int[] getIndicesWithLabel (int label) {
int[] indices = new int[size(label)];
int count = 0;
for (int i = 0; i < labels.length; i++)
if (labels[i] == label)
indices[count++] = i;
return indices;
}
public boolean equals (Object o) {
Clustering c = (Clustering) o;
return Arrays.equals(c.getLabels(), labels);
}
public String toString () {
String result="";
result+="#Clusters: "+getNumClusters()+"\n";
for(int i=0;i<getNumClusters();i++)
{
result+="\n--CLUSTER "+i+"--";
int[] cluster=getIndicesWithLabel(i);
for(int k=0;k<cluster.length;k++)
{
result+="\n\t"+instances.get(cluster[k]).getData().toString();
}
}
return result;
}
public Clustering shallowCopy () {
int[] newLabels = new int[labels.length];
System.arraycopy(labels, 0, newLabels, 0, labels.length);
Clustering c = new Clustering(instances, numLabels, newLabels);
return c;
}
// SETTERS
/** Set the cluster label for a particular instance. */
public void setLabel(int index, int label) { labels[index] = label; }
/** Set the number of clusters */
public void setNumLabels(int n) { numLabels = n; }
// SERIALIZATION
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.defaultWriteObject ();
out.writeInt (CURRENT_SERIAL_VERSION);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject ();
int version = in.readInt ();
}
}
| 4,444 | 28.633333 | 96 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/HillClimbingClusterer.java | package cc.mallet.cluster;
import java.util.LinkedList;
import cc.mallet.cluster.neighbor_evaluator.NeighborEvaluator;
import cc.mallet.pipe.Pipe;
import cc.mallet.types.InstanceList;
/**
* A Clusterer that iteratively improves a predicted Clustering using
* a {@link NeighborEvaluator}.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
* @see Clusterer
*/
public abstract class HillClimbingClusterer extends KBestClusterer {
protected NeighborEvaluator evaluator;
public HillClimbingClusterer(Pipe instancePipe, NeighborEvaluator evaluator) {
super(instancePipe);
this.evaluator = evaluator;
}
public NeighborEvaluator getEvaluator () { return evaluator; }
/**
* While not converged, calls <code>improveClustering</code> to modify the
* current predicted {@link Clustering}.
*
* @param instances
* @return The predicted {@link Clustering}.
*/
public Clustering cluster (InstanceList instances) {
return clusterKBest(instances, 1)[0];
}
/* (non-Javadoc)
* @see edu.umass.cs.mallet.base.cluster.KBestClusterer#clusterKBest(edu.umass.cs.mallet.base.types.InstanceList)
*/
public Clustering[] clusterKBest (InstanceList instances, int k) {
reset();
return clusterKBest(instances, Integer.MAX_VALUE, null, k);
}
/**
* While not converged, call <code>improveClustering</code> to
* modify the current predicted {@link Clustering}.
* @param instances Instances to cluster.
* @param iterations Maximum number of iterations.
* @param initialClustering Initial clustering of the Instances.
* @return The predicted {@link Clustering}
*/
public Clustering cluster (InstanceList instances, int iterations, Clustering initialClustering) {
return clusterKBest(instances, iterations, initialClustering, 1)[0];
}
/**
* Return the K most recent solutions.
* @param instances
* @param iterations
* @param initialClustering
* @return
*/
public Clustering[] clusterKBest (InstanceList instances, int iterations, Clustering initialClustering, int k) {
LinkedList<Clustering> solutions = new LinkedList<Clustering>();
Clustering bestsofar = (initialClustering == null) ? initializeClustering(instances) : initialClustering;
solutions.addFirst(bestsofar);
int iter = 0;
do {
bestsofar = improveClustering(solutions.getFirst().shallowCopy());
if (!bestsofar.equals(solutions.getFirst()))
solutions.addFirst(bestsofar);
if (solutions.size() == k + 1)
solutions.removeLast();
} while (!converged(bestsofar) && iter++ < iterations);
return solutions.toArray(new Clustering[]{});
}
/**
*
* @param clustering
* @return True if clustering is complete.
*/
public abstract boolean converged (Clustering clustering);
/**
*
* @param clustering
* @return A modified Clustering.
*/
public abstract Clustering improveClustering (Clustering clustering);
/**
*
* @param instances
* @return An initialized Clustering of these Instances.
*/
public abstract Clustering initializeClustering (InstanceList instances);
/**
* Perform any cleanup of the clustering algorithm prior to
* clustering.
*/
public abstract void reset ();
}
| 3,204 | 27.362832 | 114 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/GreedyAgglomerative.java | package cc.mallet.cluster;
import java.util.logging.Logger;
import cc.mallet.cluster.neighbor_evaluator.AgglomerativeNeighbor;
import cc.mallet.cluster.neighbor_evaluator.Neighbor;
import cc.mallet.cluster.neighbor_evaluator.NeighborEvaluator;
import cc.mallet.cluster.util.ClusterUtils;
import cc.mallet.cluster.util.PairwiseMatrix;
import cc.mallet.pipe.Pipe;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.util.MalletProgressMessageLogger;
/**
* Greedily merges Instances until convergence. New merges are scored
* using {@link NeighborEvaluator}.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
* @see HillClimbingClusterer
*/
public class GreedyAgglomerative extends HillClimbingClusterer {
private static final long serialVersionUID = 1L;
private static Logger progressLogger =
MalletProgressMessageLogger.getLogger(GreedyAgglomerative.class.getName()+"-pl");
/**
* Converged when merge score is below this value.
*/
protected double stoppingThreshold;
/**
* True if should stop clustering.
*/
protected boolean converged;
/**
* Cache for calls to {@link NeighborhoodEvaluator}. In some
* experiments, reduced running time by nearly half.
*/
protected PairwiseMatrix scoreCache;
/**
*
* @param instancePipe Pipe for each underying {@link Instance}.
* @param evaluator To score potential merges.
* @param stoppingThreshold Clustering converges when the evaluator score is below this value.
* @return
*/
public GreedyAgglomerative (Pipe instancePipe,
NeighborEvaluator evaluator,
double stoppingThreshold) {
super(instancePipe, evaluator);
this.stoppingThreshold = stoppingThreshold;
this.converged = false;
}
/**
*
* @param instances
* @return A singleton clustering (each Instance in its own cluster).
*/
public Clustering initializeClustering (InstanceList instances) {
reset();
return ClusterUtils.createSingletonClustering(instances);
}
public boolean converged (Clustering clustering) {
return converged;
}
/**
* Reset convergence to false so a new round of clustering can begin.
*/
public void reset () {
converged = false;
scoreCache = null;
evaluator.reset();
}
/**
* For each pair of clusters, calculate the score of the {@link Neighbor}
* that would result from merging the two clusters. Choose the merge that
* obtains the highest score. If no merge improves score, return original
* Clustering
*
* @param clustering
* @return
*/
public Clustering improveClustering (Clustering clustering) {
double bestScore = Double.NEGATIVE_INFINITY;
int[] toMerge = new int[]{-1,-1};
for (int i = 0; i < clustering.getNumClusters(); i++) {
for (int j = i + 1; j < clustering.getNumClusters(); j++) {
double score = getScore(clustering, i, j);
if (score > bestScore) {
bestScore = score;
toMerge[0] = i;
toMerge[1] = j;
}
}
}
converged = (bestScore < stoppingThreshold);
if (!(converged)) {
progressLogger.info("Merging " + toMerge[0] + "(" + clustering.size(toMerge[0]) +
" nodes) and " + toMerge[1] + "(" + clustering.size(toMerge[1]) +
" nodes) [" + bestScore + "] numClusters=" +
clustering.getNumClusters());
updateScoreMatrix(clustering, toMerge[0], toMerge[1]);
clustering = ClusterUtils.mergeClusters(clustering, toMerge[0], toMerge[1]);
} else {
progressLogger.info("Converged with score " + bestScore);
}
return clustering;
}
/**
*
* @param clustering
* @param i
* @param j
* @return The score for merging these two clusters.
*/
protected double getScore (Clustering clustering, int i, int j) {
if (scoreCache == null)
scoreCache = new PairwiseMatrix(clustering.getNumInstances());
int[] ci = clustering.getIndicesWithLabel(i);
int[] cj = clustering.getIndicesWithLabel(j);
if (scoreCache.get(ci[0], cj[0]) == 0.0) {
double val = evaluator.evaluate(
new AgglomerativeNeighbor(clustering,
ClusterUtils.copyAndMergeClusters(clustering, i, j),
ci, cj));
for (int ni = 0; ni < ci.length; ni++)
for (int nj = 0; nj < cj.length; nj++)
scoreCache.set(ci[ni], cj[nj], val);
}
return scoreCache.get(ci[0], cj[0]);
}
/**
* Resets the values of clusters that have been merged.
* @param clustering
* @param i
* @param j
*/
protected void updateScoreMatrix (Clustering clustering, int i, int j) {
int size = clustering.getNumInstances();
int[] ci = clustering.getIndicesWithLabel(i);
for (int ni = 0; ni < ci.length; ni++) {
for (int nj = 0; nj < size; nj++)
if (ci[ni] != nj)
scoreCache.set(ci[ni], nj, 0.0);
}
int[] cj = clustering.getIndicesWithLabel(j);
for (int ni = 0; ni < cj.length; ni++) {
for (int nj = 0; nj < size; nj++)
if (cj[ni] != nj)
scoreCache.set(cj[ni], nj, 0.0);
}
}
public String toString () {
return "class=" + this.getClass().getName() +
"\nstoppingThreshold=" + stoppingThreshold +
"\nneighborhoodEvaluator=[" + evaluator + "]";
}
}
| 5,172 | 27.899441 | 95 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/GreedyAgglomerativeByDensity.java | package cc.mallet.cluster;
import java.util.logging.Logger;
import cc.mallet.cluster.neighbor_evaluator.NeighborEvaluator;
import cc.mallet.cluster.util.ClusterUtils;
import cc.mallet.pipe.Pipe;
import cc.mallet.types.Instance;
import cc.mallet.util.MalletProgressMessageLogger;
import gnu.trove.TIntArrayList;
/**
* Greedily merges Instances until convergence. New merges are scored
* using {@link NeighborEvaluator}.
*
* Differs from {@link GreedyAgglomerative} in that one cluster is
* created at a time. That is, nodes are added to a cluster until
* convergence. Then, a new cluster is created from the remaining
* nodes. This reduces the number of comparisons from O(n^2) to
* O(nlg|n|).
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
* @see GreedyAgglomerative
*/
public class GreedyAgglomerativeByDensity extends GreedyAgglomerative {
private static final long serialVersionUID = 1L;
private static Logger progressLogger =
MalletProgressMessageLogger.getLogger(GreedyAgglomerativeByDensity.class.getName()+"-pl");
/**
* If true, perform greedy agglomerative clustering on the clusters
* at the end of convergence. This may alleviate the greediness of
* the byDensity clustering algorithm.
*/
boolean doPostConvergenceMerges;
/**
* Integers representing the Instance indices that have not yet been placed in a cluster.
*/
TIntArrayList unclusteredInstances;
/**
* Index of an Instance in the cluster currently being created.
*/
int instanceBeingClustered;
/**
* Randomness to order instanceBeingClustered.
*/
java.util.Random random;
/**
*
* @param instancePipe Pipe for each underying {@link Instance}.
* @param evaluator To score potential merges.
* @param stoppingThreshold Clustering converges when the evaluator score is below this value.
* @param doPostConvergenceMerges If true, perform greedy
* agglomerative clustering on the clusters at the end of
* convergence. This may alleviate the greediness of the byDensity
* clustering algorithm.
* @return
*/
public GreedyAgglomerativeByDensity (Pipe instancePipe,
NeighborEvaluator evaluator,
double stoppingThreshold,
boolean doPostConvergenceMerges,
java.util.Random random) {
super(instancePipe, evaluator, stoppingThreshold);
this.doPostConvergenceMerges = doPostConvergenceMerges;
this.random = random;
this.instanceBeingClustered = -1;
}
public boolean converged (Clustering clustering) {
return converged;
}
/**
* Reset convergence to false and clear state so a new round of
* clustering can begin.
*/
public void reset () {
super.reset();
this.unclusteredInstances = null;
this.instanceBeingClustered = -1;
}
public Clustering improveClustering (Clustering clustering) {
if (instanceBeingClustered == -1)
sampleNextInstanceToCluster(clustering);
int clusterIndex = clustering.getLabel(instanceBeingClustered);
double bestScore = Double.NEGATIVE_INFINITY;
int clusterToMerge = -1;
int instanceToMerge = -1;
for (int i = 0; i < unclusteredInstances.size(); i++) {
int neighbor = unclusteredInstances.get(i);
int neighborCluster = clustering.getLabel(neighbor);
double score = getScore(clustering, clusterIndex, neighborCluster);
if (score > bestScore) {
bestScore = score;
clusterToMerge = neighborCluster;
instanceToMerge = neighbor;
}
}
if (bestScore < stoppingThreshold) { // Move on to next instance to cluster.
sampleNextInstanceToCluster(clustering);
if (instanceBeingClustered != -1 && unclusteredInstances.size() != 0)
return improveClustering(clustering);
else { // Converged and no more instances to cluster.
if (doPostConvergenceMerges) {
throw new UnsupportedOperationException("PostConvergenceMerges not yet implemented.");
}
converged = true;
progressLogger.info("Converged with score " + bestScore);
}
} else { // Merge and continue.
progressLogger.info("Merging " + clusterIndex + "(" + clustering.size(clusterIndex) +
" nodes) and " + clusterToMerge + "(" + clustering.size(clusterToMerge) +
" nodes) [" + bestScore + "] numClusters=" +
clustering.getNumClusters());
updateScoreMatrix(clustering, clusterIndex, clusterToMerge);
unclusteredInstances.remove(unclusteredInstances.indexOf(instanceToMerge));
clustering = ClusterUtils.mergeClusters(clustering, clusterIndex, clusterToMerge);
}
return clustering;
}
private void sampleNextInstanceToCluster (Clustering clustering) {
if (unclusteredInstances == null)
fillUnclusteredInstances(clustering.getNumInstances());
instanceBeingClustered = (unclusteredInstances.size() == 0) ? -1 :
unclusteredInstances.remove(0);
}
private void fillUnclusteredInstances (int size) {
unclusteredInstances = new TIntArrayList(size);
for (int i = 0; i < size; i++)
unclusteredInstances.add(i);
unclusteredInstances.shuffle(random);
}
public String toString () {
return "class=" + this.getClass().getName() +
"\nstoppingThreshold=" + stoppingThreshold +
"\ndoPostConvergenceMerges=" + doPostConvergenceMerges +
"\nneighborhoodEvaluator=[" + evaluator + "]";
}
}
| 5,315 | 33.076923 | 95 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/Record.java | package cc.mallet.cluster;
import gnu.trove.TIntObjectHashMap;
import java.io.Serializable;
import cc.mallet.types.Alphabet;
import cc.mallet.types.AugmentableFeatureVector;
import cc.mallet.types.FeatureVector;
public class Record implements Serializable {
private static final long serialVersionUID = 1L;
Alphabet fieldAlph;
Alphabet valueAlph;
TIntObjectHashMap<FeatureVector> field2values;
public Record (Alphabet fieldAlph, Alphabet valueAlph) {
this.fieldAlph = fieldAlph;
this.valueAlph = valueAlph;
field2values = new TIntObjectHashMap<FeatureVector>();
}
public Record (Alphabet fieldAlph, Alphabet valueAlph, String[][] vals) {
this(fieldAlph, valueAlph);
for (int i = 0; i < vals.length; i++) {
AugmentableFeatureVector afv = new AugmentableFeatureVector(valueAlph, false);
for (int j = 1; j < vals[i].length; j++)
afv.add(valueAlph.lookupIndex(vals[i][j]), 1.0);
field2values.put(fieldAlph.lookupIndex(vals[i][0]), afv.toFeatureVector());
}
}
public FeatureVector values (String field) {
return values(fieldAlph.lookupIndex(field));
}
public FeatureVector values (int field) {
return (FeatureVector) field2values.get(field);
}
public int value (String field) {
return value(fieldAlph.lookupIndex(field));
}
public int value (int field) {
FeatureVector fv = values(field);
return (fv == null) ? -1 : fv.indexAtLocation(0);
}
public int[] fields () { return field2values.keys(); }
public Alphabet fieldAlphabet () { return this.fieldAlph; }
public Alphabet valueAlphabet () { return this.valueAlph; }
public String toString () { return toString(true); }
public String toString (boolean oneLine) {
StringBuffer b = new StringBuffer();
int[] keys = field2values.keys();
for (int i = 0; i < keys.length; i++) {
b.append(fieldAlph.lookupObject(keys[i]) + "=");
FeatureVector v = (FeatureVector) field2values.get(keys[i]);
for (int j = 0; j < v.numLocations(); j++)
b.append(valueAlph.lookupObject(v.indexAtLocation(j)) + ",");
if (!oneLine) b.append("\n");
}
return b.toString();
}
public static void main (String[] args) {
Record r =
new Record(new Alphabet(), new Alphabet(),
new String[][] { { "field1", "f1v1", "f1v2" },
{ "field2", "f2v1" },
{ "field3", "f3v1", "f3v2", "f3v3" } });
System.out.println(r);
}
}
| 2,387 | 27.428571 | 81 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/KBestClusterer.java | package cc.mallet.cluster;
import cc.mallet.pipe.Pipe;
import cc.mallet.types.InstanceList;
/**
* Return the K best predicted Clusterings
* @author culotta
*
*/
public abstract class KBestClusterer extends Clusterer {
public KBestClusterer(Pipe instancePipe) {
super(instancePipe);
}
public abstract Clustering[] clusterKBest(InstanceList trainingSet, int k);
}
| 378 | 17.95 | 76 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/iterator/AllPairsIterator.java | package cc.mallet.cluster.iterator;
import cc.mallet.cluster.Clustering;
import cc.mallet.cluster.neighbor_evaluator.AgglomerativeNeighbor;
import cc.mallet.cluster.util.ClusterUtils;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
/**
* Iterate over all pairs of Instances.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
* @see NeighborIterator
*/
public class AllPairsIterator extends NeighborIterator {
int i;
int j;
InstanceList instances;
/**
*
* @param clustering True Clustering.
* @return
*/
public AllPairsIterator (Clustering clustering) {
super(clustering);
i = 0;
j = 1;
this.instances = clustering.getInstances();
}
public boolean hasNext () {
return i < instances.size() - 1;
}
public Instance next () {
AgglomerativeNeighbor neighbor =
new AgglomerativeNeighbor(clustering,
ClusterUtils.copyAndMergeInstances(clustering,
i, j),
i, j);
// Increment.
if (j + 1 == instances.size()) {
i++;
j = i + 1;
} else {
j++;
}
return new Instance(neighbor, null, null, null);
}
}
| 1,173 | 20.345455 | 66 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/iterator/NeighborIterator.java | package cc.mallet.cluster.iterator;
import java.util.Iterator;
import cc.mallet.cluster.Clustering;
import cc.mallet.types.Instance;
/**
* Sample Instances with data objects equal to {@link Neighbor}s. This
* class is mainly used to generate training Instances from a true
* {@link Clustering}.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
* @see InstanceIterator
*/
public abstract class NeighborIterator implements Iterator<Instance> {
protected Clustering clustering;
/**
*
* @param clustering A true Clustering.
* @return
*/
public NeighborIterator (Clustering clustering) {
this.clustering = clustering;
}
protected Clustering getClustering () { return clustering; }
public void remove () { throw new IllegalStateException ("This Iterator<Instance> does not support remove()."); }
}
| 859 | 24.294118 | 114 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/iterator/PairSampleIterator.java | package cc.mallet.cluster.iterator;
import cc.mallet.cluster.Clustering;
import cc.mallet.cluster.neighbor_evaluator.AgglomerativeNeighbor;
import cc.mallet.cluster.util.ClusterUtils;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.util.Randoms;
/**
* Sample pairs of Instances.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
* @see NeighborIterator
*/
public class PairSampleIterator extends NeighborIterator {
protected InstanceList instances;
protected Randoms random;
protected double positiveProportion;
protected int numberSamples;
protected int positiveTarget;
protected int positiveCount;
protected int totalCount;
protected int[] nonsingletonClusters;
/**
*
* @param clustering True clustering.
* @param random Source of randomness.
* @param positiveProportion Proportion of Instances that should be positive examples.
* @param numberSamples Total number of samples to generate.
* @return
*/
public PairSampleIterator (Clustering clustering,
Randoms random,
double positiveProportion,
int numberSamples) {
super(clustering);
this.random = random;
this.positiveProportion = positiveProportion;
this.numberSamples = numberSamples;
this.positiveTarget = (int)(numberSamples * positiveProportion);
this.totalCount = this.positiveCount = 0;
this.instances = clustering.getInstances();
setNonSingletons();
}
private void setNonSingletons () {
int c = 0;
for (int i = 0; i < clustering.getNumClusters(); i++)
if (clustering.size(i) > 1)
c++;
nonsingletonClusters = new int[c];
c = 0;
for (int i = 0; i < clustering.getNumClusters(); i++)
if (clustering.size(i) > 1)
nonsingletonClusters[c++] = i;
}
public boolean hasNext () {
return totalCount < numberSamples;
}
public Instance next () {
AgglomerativeNeighbor neighbor = null;
if (nonsingletonClusters.length>0 && ( positiveCount < positiveTarget || clustering.getNumClusters() == 1)) { //mmwick modified
positiveCount++;
int label = nonsingletonClusters[random.nextInt(nonsingletonClusters.length)];
int[] instances = clustering.getIndicesWithLabel(label);
int ii = instances[random.nextInt(instances.length)];
int ij = instances[random.nextInt(instances.length)];
while (ii == ij)
ij = instances[random.nextInt(instances.length)];
neighbor = new AgglomerativeNeighbor(clustering,
clustering,
ii, ij);
} else {
int ii = random.nextInt(instances.size());
int ij = random.nextInt(instances.size());
while (clustering.getLabel(ii) == clustering.getLabel(ij))
ij = random.nextInt(instances.size());
neighbor =
new AgglomerativeNeighbor(clustering,
ClusterUtils.copyAndMergeInstances(clustering,
ii, ij),
ii, ij);
}
totalCount++;
return new Instance(neighbor, null, null, null);
}
}
| 3,031 | 30.583333 | 130 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/iterator/ClusterSampleIterator.java | package cc.mallet.cluster.iterator;
import java.util.ArrayList;
import java.util.Iterator;
import cc.mallet.cluster.Clustering;
import cc.mallet.cluster.neighbor_evaluator.AgglomerativeNeighbor;
import cc.mallet.cluster.util.ClusterUtils;
import cc.mallet.types.Instance;
import cc.mallet.util.Randoms;
/**
* Sample clusters of Instances.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
* @see PairSampleIterator, NeighborIterator
*/
public class ClusterSampleIterator extends PairSampleIterator {
/**
*
* @param clustering True clustering.
* @param random Source of randomness.
* @param positiveProportion Proportion of Instances that should be positive examples.
* @param numberSamples Total number of samples to generate.
* @return
*/
public ClusterSampleIterator (Clustering clustering,
Randoms random,
double positiveProportion,
int numberSamples) {
super(clustering, random, positiveProportion, numberSamples);
}
public Instance next () {
AgglomerativeNeighbor neighbor = null;
if ((positiveCount < positiveTarget || clustering.getNumClusters() == 1) && nonsingletonClusters.length > 0) {
positiveCount++;
int label = nonsingletonClusters[random.nextInt(nonsingletonClusters.length)];
int[] instances = clustering.getIndicesWithLabel(label);
int[][] clusters = sampleSplitFromArray(instances, random, 2);
neighbor = new AgglomerativeNeighbor(clustering,
clustering,
clusters);
} else {
int labeli = random.nextInt(clustering.getNumClusters());
int labelj = random.nextInt(clustering.getNumClusters());
while (labeli == labelj)
labelj = random.nextInt(clustering.getNumClusters());
neighbor =
new AgglomerativeNeighbor(clustering,
ClusterUtils.copyAndMergeClusters(clustering, labeli, labelj),
sampleFromArray(clustering.getIndicesWithLabel(labeli), random, 1),
sampleFromArray(clustering.getIndicesWithLabel(labelj), random, 1));
}
totalCount++;
return new Instance(neighbor, null, null, null);
}
/**
* Samples a subset of elements from this array.
* @param a
* @param random
* @return
*/
protected int[] sampleFromArray (int[] a, Randoms random, int minSize) {
// Sample size.
int size = Math.max(random.nextInt(a.length) + 1, minSize);
ArrayList toInclude = new ArrayList();
for (int i = 0; i < a.length; i++)
toInclude.add(new Integer(i));
while (toInclude.size() > size && (size != a.length))
toInclude.remove(random.nextInt(toInclude.size()));
int[] ret = new int[toInclude.size()];
int i = 0;
for (Iterator iter = toInclude.iterator(); iter.hasNext(); )
ret[i++] = a[((Integer)iter.next()).intValue()];
return ret;
}
/**
* Samples a two disjoint subset of elements from this array.
* @param a
* @param random
* @return
*/
protected int[][] sampleSplitFromArray (int[] a, Randoms random, int minSize) {
// Sample size.
int size = Math.max(random.nextInt(a.length) + 1, minSize);
ArrayList toInclude = new ArrayList();
for (int i = 0; i < a.length; i++)
toInclude.add(new Integer(i));
while (toInclude.size() > size && (size != a.length))
toInclude.remove(random.nextInt(toInclude.size()));
int[][] ret = new int[2][];
int size1 = Math.max(random.nextInt(toInclude.size() - 1), 1);
ret[0] = new int[size1];
ret[1] = new int[toInclude.size() - size1];
for (int i = 0; i < size1; i++)
ret[0][i] = ((Integer)toInclude.get(i)).intValue();
int nadded = 0;
for (int i = size1; i < toInclude.size(); i++)
ret[1][nadded++] = ((Integer)toInclude.get(i)).intValue();
return ret;
}
}
| 3,757 | 31.678261 | 113 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/iterator/NodeClusterSampleIterator.java | package cc.mallet.cluster.iterator;
import cc.mallet.cluster.Clustering;
import cc.mallet.cluster.neighbor_evaluator.AgglomerativeNeighbor;
import cc.mallet.cluster.util.ClusterUtils;
import cc.mallet.types.Instance;
import cc.mallet.util.Randoms;
/**
* Samples merges of a singleton cluster with another (possibly
* non-singleton) cluster.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
* @see PairSampleIterator, NeighborIterator
*/
public class NodeClusterSampleIterator extends ClusterSampleIterator {
/**
*
* @param clustering True clustering.
* @param random Source of randomness.
* @param positiveProportion Proportion of Instances that should be positive examples.
* @param numberSamples Total number of samples to generate.
* @return
*/
public NodeClusterSampleIterator (Clustering clustering,
Randoms random,
double positiveProportion,
int numberSamples) {
super(clustering, random, positiveProportion, numberSamples);
this.random=random;
this.positiveProportion=positiveProportion;
this.numberSamples=numberSamples;
}
public Instance next () {
AgglomerativeNeighbor neighbor = null;
if (positiveCount < positiveTarget && nonsingletonClusters.length>0){ // Sample positive.
positiveCount++;
int label = nonsingletonClusters[random.nextInt(nonsingletonClusters.length)];
int[] instances = clustering.getIndicesWithLabel(label);
int[] subcluster = sampleFromArray(instances, random, 2);
int[] cluster1 = new int[]{subcluster[random.nextInt(subcluster.length)]}; // Singleton.
int[] cluster2 = new int[subcluster.length - 1];
int nadded = 0;
for (int i = 0; i < subcluster.length; i++)
if (subcluster[i] != cluster1[0])
cluster2[nadded++] = subcluster[i];
neighbor = new AgglomerativeNeighbor(clustering,
clustering,
cluster1,
cluster2);
} else { // Sample negative.
int labeli = random.nextInt(clustering.getNumClusters());
int labelj = random.nextInt(clustering.getNumClusters());
while (labeli == labelj)
labelj = random.nextInt(clustering.getNumClusters());
int[] ii = sampleFromArray(clustering.getIndicesWithLabel(labeli), random, 1);
int[] ij = sampleFromArray(clustering.getIndicesWithLabel(labelj), random, 1);
neighbor =
new AgglomerativeNeighbor(clustering,
ClusterUtils.copyAndMergeClusters(clustering,
labeli,
labelj),
ii,
new int[]{ij[random.nextInt(ij.length)]});
}
totalCount++;
return new Instance(neighbor, null, null, null);
}
}
| 2,770 | 33.6375 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/iterator/tests/TestIterators.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.cluster.iterator.tests;
import cc.mallet.cluster.Clustering;
import cc.mallet.cluster.iterator.*;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.util.Randoms;
import junit.framework.*;
/**
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
* @see TestCase
*/
public class TestIterators extends TestCase
{
public TestIterators (String name)
{
super (name);
}
private Clustering generateClustering (InstanceList instances) {
int[] labels = new int[]{0,0,0,1,1,1,2,2,2,2};
return new Clustering(instances, 3, labels);
}
public void testEvaluators ()
{
Randoms random = new Randoms(1);
InstanceList instances = new InstanceList(random, 100, 2).subList(0,10);
System.err.println(instances.size() + " instances");
Clustering clustering = generateClustering(instances);
System.err.println("clustering=" + clustering);
System.err.println("ClusterSampleIterator");
NeighborIterator iter = new ClusterSampleIterator(clustering,
random,
0.5,
10);
while (iter.hasNext()) {
Instance instance = (Instance)iter.next();
System.err.println(instance.getData() + "\n");
}
System.err.println("\n\nPairSampleIterator");
iter = new PairSampleIterator(clustering,
random,
0.5,
10);
while (iter.hasNext()) {
Instance instance = (Instance)iter.next();
System.err.println(instance.getData() + "\n");
}
System.err.println("\n\nAllPairsIterator");
iter = new AllPairsIterator(clustering);
while (iter.hasNext()) {
Instance instance = (Instance)iter.next();
System.err.println(instance.getData() + "\n");
}
}
public static Test suite ()
{
return new TestSuite (TestIterators.class);
}
protected void setUp ()
{
}
public static void main (String[] args)
{
junit.textui.TestRunner.run (suite());
}
}
| 2,430 | 26.314607 | 76 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/examples/FirstOrderClusterExample.java | package cc.mallet.cluster.examples;
import cc.mallet.classify.Classifier;
import cc.mallet.classify.MaxEntTrainer;
import cc.mallet.classify.Trial;
import cc.mallet.classify.evaluate.ConfusionMatrix;
import cc.mallet.cluster.Clusterer;
import cc.mallet.cluster.Clustering;
import cc.mallet.cluster.GreedyAgglomerativeByDensity;
import cc.mallet.cluster.evaluate.AccuracyEvaluator;
import cc.mallet.cluster.evaluate.BCubedEvaluator;
import cc.mallet.cluster.evaluate.ClusteringEvaluator;
import cc.mallet.cluster.evaluate.ClusteringEvaluators;
import cc.mallet.cluster.evaluate.MUCEvaluator;
import cc.mallet.cluster.evaluate.PairF1Evaluator;
import cc.mallet.cluster.iterator.ClusterSampleIterator;
import cc.mallet.cluster.neighbor_evaluator.AgglomerativeNeighbor;
import cc.mallet.cluster.neighbor_evaluator.ClassifyingNeighborEvaluator;
import cc.mallet.cluster.util.ClusterUtils;
import cc.mallet.pipe.Pipe;
import cc.mallet.types.Alphabet;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.InfoGain;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.LabelAlphabet;
import cc.mallet.util.PropertyList;
import cc.mallet.util.Randoms;
/**
* Illustrates use of a supervised clustering method that uses
* features over clusters. Synthetic data is created where Instances
* belong in same cluster iff they each have a feature called
* "feature0".
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
*/
public class FirstOrderClusterExample {
Randoms random;
double noise;
public FirstOrderClusterExample () {
this.random = new Randoms(123456789);
this.noise = 0.01;
}
public void run () {
Alphabet alphabet = dictOfSize(20);
// TRAIN
Clustering training = sampleClustering(alphabet);
Pipe clusterPipe = new OverlappingFeaturePipe();
System.err.println("Training with " + training);
InstanceList trainList = new InstanceList(clusterPipe);
trainList.addThruPipe(new ClusterSampleIterator(training, random, 0.5, 100));
System.err.println("Created " + trainList.size() + " instances.");
Classifier me = new MaxEntTrainer().train(trainList);
ClassifyingNeighborEvaluator eval =
new ClassifyingNeighborEvaluator(me, "YES");
Trial trial = new Trial(me, trainList);
System.err.println(new ConfusionMatrix(trial));
InfoGain ig = new InfoGain(trainList);
ig.print();
// Clusterer clusterer = new GreedyAgglomerative(training.getInstances().getPipe(),
// eval, 0.5);
Clusterer clusterer = new GreedyAgglomerativeByDensity(training.getInstances().getPipe(),
eval, 0.5, false,
new java.util.Random(1));
// TEST
Clustering testing = sampleClustering(alphabet);
InstanceList testList = testing.getInstances();
Clustering predictedClusters = clusterer.cluster(testList);
// EVALUATE
System.err.println("\n\nEvaluating System: " + clusterer);
ClusteringEvaluators evaluators = new ClusteringEvaluators(new ClusteringEvaluator[]{
new BCubedEvaluator(),
new PairF1Evaluator(),
new MUCEvaluator(),
new AccuracyEvaluator()});
System.err.println("truth:" + testing);
System.err.println("pred: " + predictedClusters);
System.err.println(evaluators.evaluate(testing, predictedClusters));
}
/**
* Sample a InstanceList and its true clustering.
* @param alph
* @return
*/
private Clustering sampleClustering (Alphabet alph) {
InstanceList instances =
new InstanceList(random,
alph,
new String[]{"foo", "bar"},
30).subList(0, 20);
Clustering singletons = ClusterUtils.createSingletonClustering(instances);
// Merge instances that both have feature0
for (int i = 0; i < instances.size(); i++) {
FeatureVector fvi = (FeatureVector)instances.get(i).getData();
for (int j = i + 1; j < instances.size(); j++) {
FeatureVector fvj = (FeatureVector)instances.get(j).getData();
if (fvi.contains("feature0") && fvj.contains("feature0")) {
singletons = ClusterUtils.mergeClusters(singletons,
singletons.getLabel(i),
singletons.getLabel(j));
} else if (!(fvi.contains("feature0") || fvj.contains("feature0"))
&& random.nextUniform() < noise) {
// Random noise.
singletons = ClusterUtils.mergeClusters(singletons,
singletons.getLabel(i),
singletons.getLabel(j));
}
}
}
return singletons;
}
private Alphabet dictOfSize (int size) {
Alphabet ret = new Alphabet ();
for (int i = 0; i < size; i++)
ret.lookupIndex ("feature"+i);
return ret;
}
/**
* Computes a feature that indicates whether or not all members of a
* cluster have a feature named "feature0".
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
* @see Pipe
*/
private class OverlappingFeaturePipe extends Pipe {
private static final long serialVersionUID = 1L;
public OverlappingFeaturePipe () {
super (new Alphabet(), new LabelAlphabet());
}
public Instance pipe (Instance carrier) {
boolean mergeFirst = false;
AgglomerativeNeighbor neighbor = (AgglomerativeNeighbor)carrier.getData();
Clustering original = neighbor.getOriginal();
InstanceList list = original.getInstances();
int[] mergedIndices = neighbor.getNewCluster();
boolean match = true;
for (int i = 0; i < mergedIndices.length; i++) {
for (int j = i + 1; j < mergedIndices.length; j++) {
if ((original.getLabel(mergedIndices[i]) !=
original.getLabel(mergedIndices[j])) || mergeFirst) {
FeatureVector fvi = (FeatureVector)list.get(mergedIndices[i]).getData();
FeatureVector fvj = (FeatureVector)list.get(mergedIndices[j]).getData();
if (!(fvi.contains("feature0") && fvj.contains("feature0"))) {
match = false;
break;
}
}
}
}
PropertyList pl = null;
if (match)
pl = PropertyList.add("Match", 1.0, pl);
else
pl = PropertyList.add("NoMatch", 1.0, pl);
FeatureVector fv = new FeatureVector ((Alphabet)getDataAlphabet(),
pl, true);
carrier.setData(fv);
boolean positive = true;
for (int i = 0; i < mergedIndices.length; i++) {
for (int j = i + 1; j < mergedIndices.length; j++) {
if (original.getLabel(mergedIndices[i]) != original.getLabel(mergedIndices[j])) {
positive = false;
break;
}
}
}
LabelAlphabet ldict = (LabelAlphabet)getTargetAlphabet();
String label = positive ? "YES" : "NO";
carrier.setTarget(ldict.lookupLabel(label));
return carrier;
}
}
public static void main (String[] args) {
FirstOrderClusterExample ex = new FirstOrderClusterExample();
ex.run();
}
}
| 6,893 | 32.629268 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/util/PairwiseMatrix.java | package cc.mallet.cluster.util;
import java.io.Serializable;
/**
* 2-D upper-triangular matrix. Used to store pairwise affinity
* scores.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
*/
public class PairwiseMatrix implements Serializable {
private static final long serialVersionUID = 1L;
double[][] vals;
public PairwiseMatrix (int size) {
vals = new double[size - 1][];
for (int i = 0; i < size - 1; i++) {
vals[i] = new double[size - i - 1];
for (int j = 0; j < vals[i].length; j++)
vals[i][j] = 0.0;
}
}
public void set (int i, int j, double v) {
int[] indices = sort(i, j);
vals[indices[0]][indices[1] - indices[0] - 1] = v;
}
public double get (int i, int j) {
int[] indices = sort(i, j);
return vals[indices[0]][indices[1] - indices[0] - 1];
}
public int length (int i) {
return vals[i].length;
}
private int[] sort (int i, int j) {
int[] ret = new int[2];
if (i < j) {
ret[0] = i;
ret[1] = j;
} else {
ret[0] = j;
ret[1] = i;
}
return ret;
}
public String toString () {
StringBuffer buf = new StringBuffer();
for (int i = 0; i < vals.length; i++) {
for (int j = 0; j < vals[i].length; j++) {
buf.append(vals[i][j] + " ");
}
buf.append("\n");
}
return buf.toString();
}
public static void main (String[] args) {
int size = Integer.parseInt(args[0]);
PairwiseMatrix m = new PairwiseMatrix(size);
for (int i = 0; i < size; i++)
for (int j = i + 1; j < size; j++)
m.set(i, j, Double.parseDouble(i + "" + j));
System.err.println(m);
}
}
| 1,607 | 20.44 | 63 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/util/ClusterUtils.java | package cc.mallet.cluster.util;
import cc.mallet.cluster.Clustering;
import cc.mallet.pipe.Noop;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.util.Randoms;
/**
* Utility functions for Clusterings.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
* @see Clustering
*/
public class ClusterUtils {
/**
* @param li
* @param lj
* @return A new {@link InstanceList} where <code>lj</code> is appended to <code>li</code>.
*/
public static InstanceList combineLists (InstanceList li,
InstanceList lj) {
InstanceList newList = new InstanceList(li.getPipe());
for (int i = 0; i < li.size(); i++)
newList.add(li.get(i));
for (int i = 0; i < lj.size(); i++)
newList.add(lj.get(i));
return newList;
}
/**
* Relabels the clustering to reflect merging clusters i and
* j. Relabels all of Instances with label j to label i.
* @param clustering
* @param i
* @param j
* @return Modified Clustering.
*/
public static Clustering mergeClusters (Clustering clustering,
int labeli, int labelj) {
if (labeli == labelj)
return clustering;
// Set all labelj labels to labeli.
InstanceList instances = clustering.getInstances();
for (int i = 0; i < instances.size(); i++) {
int idx = clustering.getLabel(i);
if (idx == labelj)
clustering.setLabel(i, labeli);
}
clustering.setNumLabels(clustering.getNumClusters() - 1);
// Decrement cluster indices that are greater than the number of clusters.
for (int i = 0; i < instances.size(); i++) {
int idx = clustering.getLabel(i);
if (idx > labelj)
clustering.setLabel(i, idx - 1);
}
return clustering;
}
/**
* Merge clusters containing the specified instances.
* @param clustering
* @param instances
* @return Modified Clustering.
*/
public static Clustering mergeInstances (Clustering clustering,
int[] instances) {
for (int i = 0; i < instances.length; i++) {
for (int j = i + 1; j < instances.length; j++) {
int labeli = clustering.getLabel(instances[i]);
int labelj = clustering.getLabel(instances[j]);
clustering = mergeClusters(clustering, labeli, labelj);
}
}
return clustering;
}
public static int[] getCombinedInstances (Clustering clustering, int i, int j) {
int[] ci = clustering.getIndicesWithLabel(i);
int[] cj = clustering.getIndicesWithLabel(j);
int[] merged = new int[ci.length + cj.length];
System.arraycopy(ci, 0, merged, 0, ci.length);
System.arraycopy(cj, 0, merged, ci.length, cj.length);
return merged;
}
public static Clustering mergeInstances (Clustering clustering,
int i, int j) {
return mergeInstances(clustering, new int[]{i, j});
}
/**
* Initializes Clustering to one Instance per cluster.
* @param instances
* @return Singleton Clustering.
*/
public static Clustering createSingletonClustering (InstanceList instances) {
int[] labels = new int[instances.size()];
for (int i = 0; i < labels.length; i++)
labels[i] = i;
return new Clustering(instances,
labels.length,
labels);
}
public static Clustering createRandomClustering (InstanceList instances,
Randoms random) {
Clustering clustering = createSingletonClustering(instances);
int numMerges = 2 + random.nextInt(instances.size() - 2);
for (int i = 0; i < numMerges; i++)
clustering = mergeInstances(clustering,
random.nextInt(instances.size()),
random.nextInt(instances.size()));
return clustering;
}
/**
*
* @param clustering
* @param indices
* @return A Clustering where no Instances in <code>indices</code>
* are in the same cluster.
*/
public static Clustering shatterInstances (Clustering clustering, int[] indices) {
for (int i = 0; i < indices.length - 1; i++) {
clustering.setLabel(indices[i], clustering.getNumClusters());
clustering.setNumLabels(clustering.getNumClusters() + 1);
}
return clustering;
}
/**
*
* @param i
* @param j
* @return A new {@link InstanceList} containing the two argument {@link Instance}s.
*/
public static InstanceList makeList (Instance i, Instance j) {
InstanceList list = new InstanceList(new Noop(i.getDataAlphabet(), i.getTargetAlphabet()));
list.add(i);
list.add(j);
return list;
}
/**
* @param clustering
* @return A shallow copy of the argument where new objects are only
* allocated for the cluster assignment.
*/
public static Clustering copyWithNewLabels (Clustering clustering) {
int[] oldLabels = clustering.getLabels();
int[] newLabels = new int[oldLabels.length];
System.arraycopy(oldLabels, 0, newLabels, 0, oldLabels.length);
return new Clustering(clustering.getInstances(),
clustering.getNumClusters(),
newLabels);
}
public static Clustering mergeInstancesWithSameLabel (Clustering clustering) {
InstanceList list = clustering.getInstances();
for (int i = 0; i < list.size(); i++) {
Instance ii = list.get(i);
int li = clustering.getLabel(i);
for (int j = i + 1; j < list.size(); j++) {
Instance ij = list.get(j);
int lj = clustering.getLabel(j);
if (li != lj && ii.getLabeling().equals(ij.getLabeling()))
clustering = ClusterUtils.mergeClusters(clustering, li, lj);
}
}
return clustering;
}
/**
*
* @param clustering
* @param i
* @param j
* @return A new copy of <code>clustering</code> in which clusters
* with labels <code>i</code> and <code>j</code> have been merged.
*/
public static Clustering copyAndMergeClusters (Clustering clustering, int i, int j) {
return mergeClusters(copyWithNewLabels(clustering), i, j);
}
/**
*
* @param clustering
* @param i
* @param j
* @return A new copy of <code>clustering</code> in which {@link
* Instance}s <code>i</code> and <code>j</code> have been put in the
* same cluster.
*/
public static Clustering copyAndMergeInstances (Clustering clustering, int i, int j) {
return copyAndMergeInstances(clustering, new int[]{i, j});
}
/**
*
* @param clustering
* @param instances
* @return A new copy of <code>clustering</code> in which the
* clusters containing the specified {@link Instance}s have been
* merged together into one cluster.
*/
public static Clustering copyAndMergeInstances (Clustering clustering, int[] instances) {
return mergeInstances(copyWithNewLabels(clustering), instances);
}
}
| 6,559 | 28.954338 | 93 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/clustering_scorer/PairwiseScorer.java | package cc.mallet.cluster.clustering_scorer;
import cc.mallet.cluster.Clustering;
import cc.mallet.cluster.iterator.AllPairsIterator;
import cc.mallet.cluster.neighbor_evaluator.AgglomerativeNeighbor;
import cc.mallet.cluster.neighbor_evaluator.NeighborEvaluator;
import cc.mallet.cluster.util.ClusterUtils;
import cc.mallet.types.Instance;
/**
* For each pair of Instances, if the pair is predicted to be in the same
* cluster, increment the total by the evaluator's score for merging the two.
* Else, increment by 1 - evaluator score. Divide by number of pairs.
*
* @author culotta
*
*/
public class PairwiseScorer implements ClusteringScorer {
NeighborEvaluator evaluator;
public PairwiseScorer(NeighborEvaluator evaluator) {
super();
this.evaluator = evaluator;
}
public double score(Clustering clustering) {
Clustering singletons = ClusterUtils
.createSingletonClustering(clustering.getInstances());
double total = 0;
int count = 0;
for (AllPairsIterator iter = new AllPairsIterator(singletons); iter
.hasNext(); count++) {
Instance instance = (Instance) iter.next();
AgglomerativeNeighbor neighbor = (AgglomerativeNeighbor) instance
.getData();
double score = evaluator.evaluate(neighbor);
int[][] clusters = neighbor.getOldClusters();
if (clustering.getLabel(clusters[0][0]) == clustering
.getLabel(clusters[1][0]))
total += score;
else
total += 1.0 - score;
}
return (double) total / count;
}
}
| 1,483 | 29.285714 | 77 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/clustering_scorer/ClusteringScorer.java | package cc.mallet.cluster.clustering_scorer;
import cc.mallet.cluster.Clustering;
/**
* Assign a score to a Clustering. Higher is better.
* @author culotta
*
*/
public interface ClusteringScorer {
public double score (Clustering clustering);
}
| 252 | 17.071429 | 52 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/neighbor_evaluator/Neighbor.java | package cc.mallet.cluster.neighbor_evaluator;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import cc.mallet.cluster.Clustering;
import cc.mallet.types.Alphabet;
/**
* A Clustering and a modified version of that Clustering.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
*/
public class Neighbor implements Serializable {
Clustering original;
Clustering modified;
Alphabet alphabet;
public Neighbor(Clustering original, Clustering modified) {
this.original = original;
this.modified = modified;
}
/**
*
* @return The original Clustering.
*/
public Clustering getOriginal() {
return original;
}
/**
*
* @return The modified Clustering.
*/
public Clustering getModified() {
return modified;
}
public String toString() {
return "original=" + original + "\nmodified=" + modified;
}
// SERIALIZATION
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject(ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
out.writeInt(CURRENT_SERIAL_VERSION);
}
private void readObject(ObjectInputStream in) throws IOException,
ClassNotFoundException {
in.defaultReadObject();
int version = in.readInt();
}
}
| 1,372 | 19.80303 | 70 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/neighbor_evaluator/NeighborEvaluator.java | package cc.mallet.cluster.neighbor_evaluator;
/**
* Scores the value of changing the current {@link Clustering} to the
* modified {@link Clustering} specified in a {@link Neighbor} object.
*
* A common implementation of this interface uses a {@link Classifier}
* to assign a score to a {@link Neighbor}.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
*/
public interface NeighborEvaluator {
/**
*
* @param neighbor
* @return A higher score indicates that the modified Clustering is preferred.
*/
public double evaluate (Neighbor neighbor);
/**
*
* @param neighbors
* @return One score per neighbor. A higher score indicates that the
* modified Clustering is preferred.
*
*/
public double[] evaluate (Neighbor[] neighbors);
/**
* Reset the state of the evaluator.
*/
public void reset ();
}
| 874 | 22.026316 | 79 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/neighbor_evaluator/RandomEvaluator.java | package cc.mallet.cluster.neighbor_evaluator;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import cc.mallet.util.Randoms;
/**
* Randomly scores {@link Neighbor}s.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
* @see NeighborEvaluator
*/
public class RandomEvaluator implements NeighborEvaluator, Serializable {
Randoms random;
public RandomEvaluator (Randoms random) {
this.random = random;
}
/**
*
* @param neighbor
* @return A higher score indicates that the modified Clustering is preferred.
*/
public double evaluate (Neighbor neighbor) {
return random.nextUniform(0, 1);
}
/**
*
* @param neighbors
* @return One score per neighbor. A higher score indicates that the
* modified Clustering is preferred.
*
*/
public double[] evaluate (Neighbor[] neighbors) {
double[] scores = new double[neighbors.length];
for (int i = 0; i < neighbors.length; i++)
scores[i] = evaluate(neighbors[i]);
return scores;
}
/**
* Reset the state of the evaluator.
*/
public void reset () {}
// SERIALIZATION
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.defaultWriteObject ();
out.writeInt (CURRENT_SERIAL_VERSION);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject ();
int version = in.readInt ();
}
}
| 1,603 | 21.914286 | 93 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/neighbor_evaluator/PairwiseEvaluator.java | package cc.mallet.cluster.neighbor_evaluator;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.ArrayList;
import cc.mallet.classify.Classifier;
import cc.mallet.cluster.Clustering;
import cc.mallet.cluster.util.PairwiseMatrix;
import cc.mallet.types.MatrixOps;
/**
* Uses a {@link Classifier} over pairs of {@link Instances} to score
* {@link Neighbor}. Currently only supports {@link
* AgglomerativeNeighbor}s.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
* @see ClassifyingNeighborEvaluator
*/
public class PairwiseEvaluator extends ClassifyingNeighborEvaluator {
private static final long serialVersionUID = 1L;
/**
* How to combine a set of pairwise scores (e.g. mean, max, ...).
*/
CombiningStrategy combiningStrategy;
/**
* If true, score all edges involved in a merge. If false, only
* score the edges that croess the boundaries of the clusters being
* merged.
*/
boolean mergeFirst;
/**
* Cache for calls to getScore. In some experiments, reduced running
* time by nearly half.
*/
PairwiseMatrix scoreCache;
/**
*
* @param classifier Classifier to assign scores to {@link
* Neighbor}s for which a pair of Instances has been merged.
* @param scoringLabel The predicted label that corresponds to a
* positive example (e.g. "YES").
* @param combiningStrategy How to combine the pairwise scores
* (e.g. max, mean, ...).
* @param mergeFirst If true, score all edges involved in a
* merge. If false, only score the edges that cross the boundaries
* of the clusters being merged.
* @return
*/
public PairwiseEvaluator (Classifier classifier,
String scoringLabel,
CombiningStrategy combiningStrategy,
boolean mergeFirst) {
super(classifier, scoringLabel);
this.combiningStrategy = combiningStrategy;
this.mergeFirst = mergeFirst;
}
public double[] evaluate (Neighbor[] neighbors) {
double[] scores = new double[neighbors.length];
for (int i = 0; i < neighbors.length; i++)
scores[i] = evaluate(neighbors[i]);
return scores;
}
public double evaluate (Neighbor neighbor) {
if (!(neighbor instanceof AgglomerativeNeighbor))
throw new IllegalArgumentException("Expect AgglomerativeNeighbor not " + neighbor.getClass().getName());
AgglomerativeNeighbor aneighbor = (AgglomerativeNeighbor) neighbor;
Clustering original = neighbor.getOriginal();
// int[] mergedIndices = ((AgglomerativeNeighbor)neighbor).getNewCluster();
int[] cluster1 = aneighbor.getOldClusters()[0];
int[] cluster2 = aneighbor.getOldClusters()[1];
ArrayList<Double> scores = new ArrayList<Double>();
for (int i = 0; i < cluster1.length; i++) // Between cluster scores.
for (int j = 0; j < cluster2.length; j++) {
AgglomerativeNeighbor pwneighbor =
new AgglomerativeNeighbor(original, original, cluster1[i], cluster2[j]);
scores.add(new Double(getScore(pwneighbor)));
}
if (mergeFirst) { // Also add w/in cluster scores.
for (int i = 0; i < cluster1.length; i++)
for (int j = i + 1; j < cluster1.length; j++) {
AgglomerativeNeighbor pwneighbor =
new AgglomerativeNeighbor(original, original, cluster1[i], cluster1[j]);
scores.add(new Double(getScore(pwneighbor)));
}
for (int i = 0; i < cluster2.length; i++)
for (int j = i + 1; j < cluster2.length; j++) {
AgglomerativeNeighbor pwneighbor =
new AgglomerativeNeighbor(original, original, cluster2[i], cluster2[j]);
scores.add(new Double(getScore(pwneighbor)));
}
}
// XXX This breaks during training if original cluster does not agree with mergedIndices.
// for (int i = 0; i < mergedIndices.length; i++) {
// for (int j = i + 1; j < mergedIndices.length; j++) {
// if ((original.getLabel(mergedIndices[i]) != original.getLabel(mergedIndices[j])) || mergeFirst) {
// AgglomerativeNeighbor pwneighbor =
// new AgglomerativeNeighbor(original, original,
// mergedIndices[i], mergedIndices[j]);
// scores.add(new Double(getScore(pwneighbor)));
// }
// }
// }
if (scores.size() < 1)
throw new IllegalStateException("No pairs of Instances were scored.");
double[] vals = new double[scores.size()];
for (int i = 0; i < vals.length; i++)
vals[i] = ((Double)scores.get(i)).doubleValue();
return combiningStrategy.combine(vals);
}
public void reset () {
scoreCache = null;
}
public String toString () {
return "class=" + this.getClass().getName() +
" classifier=" + classifier.getClass().getName();
}
private double getScore (AgglomerativeNeighbor pwneighbor) {
if (scoreCache == null)
scoreCache = new PairwiseMatrix(pwneighbor.getOriginal().getNumInstances());
int[] indices = pwneighbor.getNewCluster();
if (scoreCache.get(indices[0], indices[1]) == 0.0) {
scoreCache.set(indices[0], indices[1],
classifier.classify(pwneighbor).getLabelVector().value(scoringLabel));
}
return scoreCache.get(indices[0], indices[1]);
}
/**
* Specifies how to combine a set of pairwise scores into a
* cluster-wise score.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
*/
public static interface CombiningStrategy {
public double combine (double[] scores);
}
public static class Average implements CombiningStrategy, Serializable {
public double combine (double[] scores) {
return MatrixOps.mean(scores);
}
// SERIALIZATION
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject(ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
out.writeInt(CURRENT_SERIAL_VERSION);
}
private void readObject(ObjectInputStream in) throws IOException,
ClassNotFoundException {
in.defaultReadObject();
int version = in.readInt();
}
}
public static class Minimum implements CombiningStrategy, Serializable {
public double combine (double[] scores) {
return MatrixOps.min(scores);
}
// SERIALIZATION
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject(ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
out.writeInt(CURRENT_SERIAL_VERSION);
}
private void readObject(ObjectInputStream in) throws IOException,
ClassNotFoundException {
in.defaultReadObject();
int version = in.readInt();
}
}
public static class Maximum implements CombiningStrategy, Serializable {
public double combine (double[] scores) {
return MatrixOps.max(scores);
}
// SERIALIZATION
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject(ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
out.writeInt(CURRENT_SERIAL_VERSION);
}
private void readObject(ObjectInputStream in) throws IOException,
ClassNotFoundException {
in.defaultReadObject();
int version = in.readInt();
}
}
}
| 7,182 | 30.643172 | 108 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/neighbor_evaluator/RankingNeighborEvaluator.java | package cc.mallet.cluster.neighbor_evaluator;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import cc.mallet.classify.Classifier;
import cc.mallet.types.Instance;
import cc.mallet.types.LabelVector;
/**
* Uses a {@link Classifier} that scores an array of {@link
* Neighbor}s. The Classifier expects {@link Instance}s with data
* equal to an array of {@link Neighbor}s. The labeling of each
* Instance is a set of {@link Integer}s, with labeling i
* corresponding the likelihood that {@link Neighbor} i is the "best"
* {@link Neighbor}.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
* @see ClassifyingNeighborEvaluator
*/
public class RankingNeighborEvaluator extends ClassifyingNeighborEvaluator {
/**
*
* @param classifier The Classifier used to assign a score to a {@link Neighbor}.
* @return
*/
public RankingNeighborEvaluator (Classifier classifier) {
super(classifier, null);
}
public double evaluate (Neighbor neighbor) {
throw new UnsupportedOperationException("This class expects an array of Neighbors to choose from");
}
/**
*
* @param neighbors
* @return An array containing a score for each of the elements of <code>neighbors</code>.
*/
public double[] evaluate (Neighbor[] neighbors) {
double[] scores = new double[neighbors.length];
LabelVector ranks = classifier.classify(neighbors).getLabelVector();
for (int i = 0; i < ranks.numLocations(); i++) {
int idx = ((Integer)ranks.getLabelAtRank(i).getEntry()).intValue();
scores[idx] = ranks.getValueAtRank(i);
}
return scores;
}
public void reset () { }
public String toString () {
return "class=" + this.getClass().getName() +
" classifier=" + classifier.getClass().getName();
}
// SERIALIZATION
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.defaultWriteObject ();
out.writeInt (CURRENT_SERIAL_VERSION);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject ();
int version = in.readInt ();
}
}
| 2,251 | 28.246753 | 101 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/neighbor_evaluator/ClassifyingNeighborEvaluator.java | package cc.mallet.cluster.neighbor_evaluator;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import cc.mallet.classify.Classifier;
/**
* A {@link NeighborEvaluator} that is backed by a {@link
* Classifier}. The score for a {@link Neighbor} is the Classifier's
* predicted value for the label corresponding to <code>scoringLabel</code>.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
* @see NeighborEvaluator
*/
public class ClassifyingNeighborEvaluator implements NeighborEvaluator, Serializable {
/**
* The Classifier used to assign a score to each {@link Neighbor}.
*/
Classifier classifier;
/**
* The label corresponding to a positive instance (e.g. "YES").
*/
String scoringLabel;
/**
*
* @param classifier The Classifier used to assign a score to each {@link Neighbor}.
* @param scoringLabel The label corresponding to a positive instance (e.g. "YES").
* @return
*/
public ClassifyingNeighborEvaluator (Classifier classifier,
String scoringLabel) {
this.classifier = classifier;
this.scoringLabel = scoringLabel;
}
/**
*
* @return The classifier.
*/
public Classifier getClassifier () { return classifier; }
public double evaluate (Neighbor neighbor) {
return classifier.classify(neighbor).getLabelVector().value(scoringLabel);
}
public double[] evaluate (Neighbor[] neighbors) {
double[] scores = new double[neighbors.length];
for (int i = 0; i < neighbors.length; i++)
scores[i] = evaluate(neighbors[i]);
return scores;
}
public void reset () {
}
public String toString () {
return "class=" + this.getClass().getName() +
" classifier=" + classifier.getClass().getName() +
" scoringLabel=" + scoringLabel;
}
// SERIALIZATION
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.defaultWriteObject ();
out.writeInt (CURRENT_SERIAL_VERSION);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
in.defaultReadObject ();
int version = in.readInt ();
}
}
| 2,281 | 25.229885 | 93 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/neighbor_evaluator/AgglomerativeNeighbor.java | package cc.mallet.cluster.neighbor_evaluator;
import cc.mallet.cluster.Clustering;
import cc.mallet.util.ArrayUtils;
/**
* A {@link Neighbor} created by merging two clusters of the original
* Clustering.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
*/
public class AgglomerativeNeighbor extends Neighbor {
private static final long serialVersionUID = 1L;
/**
* Instance indices in the new, merged cluster.
*/
int[] newCluster;
/**
* Instance indices in the old, pre-merged clusters.
*/
int[][] oldClusters;
/**
*
* @param original
* @param modified
* @param cluster1 Instance indices for one cluster that was merged.
* @param cluster2 Instance indices for other cluster that was merged.
* @return
*/
public AgglomerativeNeighbor (Clustering original,
Clustering modified,
int[][] oldClusters) {
super(original, modified);
if (oldClusters.length != 2)
throw new IllegalArgumentException("Agglomerations of more than 2 clusters not yet implemented.");
this.oldClusters = oldClusters;
this.newCluster = ArrayUtils.append(oldClusters[0], oldClusters[1]);
}
public AgglomerativeNeighbor (Clustering original,
Clustering modified,
int[] oldCluster1, int[] oldCluster2) {
this(original, modified, new int[][]{oldCluster1, oldCluster2});
}
public AgglomerativeNeighbor (Clustering original,
Clustering modified,
int oldCluster1, int oldCluster2) {
this(original, modified, new int[][]{{oldCluster1}, {oldCluster2}});
}
public int[] getNewCluster () { return newCluster; }
public int[][] getOldClusters () { return oldClusters; }
public String toString () {
String ret = super.toString() + "\nnewcluster=";
for (int i = 0; i < newCluster.length; i++)
ret += newCluster[i] + " ";
return ret;
}
}
| 1,906 | 26.637681 | 101 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/neighbor_evaluator/MedoidEvaluator.java | package cc.mallet.cluster.neighbor_evaluator;
//import weka.core.Instances;
import cc.mallet.classify.Classifier;
import cc.mallet.cluster.Clustering;
import cc.mallet.cluster.util.PairwiseMatrix;
import cc.mallet.types.MatrixOps;
/**
* Uses a {@link Classifier} over pairs of {@link Instances} to score
* {@link Neighbor}. Currently only supports {@link
* AgglomerativeNeighbor}s.
*
* @author "Michael Wick" <[email protected]>
* @version 1.0
* @since 1.0
* @see ClassifyingNeighborEvaluator
*/
public class MedoidEvaluator extends ClassifyingNeighborEvaluator {
private static final long serialVersionUID = 1L;
/**
* If single link is true, then the score of clusters A and B is the score of the link between the two medoids.
*/
boolean singleLink=false;
/**
* How to combine a set of pairwise scores (e.g. mean, max, ...)... [currently not supported in this class]
*/
CombiningStrategy combiningStrategy;
/**
* If true, score all edges involved in a merge. If false, only
* score the edges that croess the boundaries of the clusters being
* merged.
*/
boolean mergeFirst=true;
/**
* Cache for calls to getScore. In some experiments, reduced running
* time by nearly half.
*/
PairwiseMatrix scoreCache;
/**
*
* @param classifier Classifier to assign scores to {@link
* Neighbor}s for which a pair of Instances has been merged.
* @param scoringLabel The predicted label that corresponds to a
* positive example (e.g. "YES").
* @param combiningStrategy How to combine the pairwise scores
* (e.g. max, mean, ...).
* @param mergeFirst If true, score all edges involved in a
* merge. If false, only score the edges that cross the boundaries
* of the clusters being merged.
* @return
*/
public MedoidEvaluator(Classifier classifier, String scoringLabel)
{
super(classifier,scoringLabel);
System.out.println("Using Medoid Evaluator");
}
public MedoidEvaluator(Classifier classifier, String scoringLabel,boolean singleLink,boolean mergeFirst)
{
super(classifier,scoringLabel);
this.singleLink=singleLink;
this.mergeFirst=mergeFirst;
System.out.println("Using Medoid Evaluator. Single link="+singleLink+".");
}
/*
public MedoidEvaluator (Classifier classifier,
String scoringLabel,
CombiningStrategy combiningStrategy,
boolean mergeFirst) {
super(classifier, scoringLabel);
this.combiningStrategy = combiningStrategy;
this.mergeFirst = mergeFirst;
System.out.println("Using Centroid Evaluator (2)");
}
*/
public double[] evaluate (Neighbor[] neighbors) {
double[] scores = new double[neighbors.length];
for (int i = 0; i < neighbors.length; i++)
scores[i] = evaluate(neighbors[i]);
return scores;
}
public double evaluate(Neighbor neighbor)
{
int result[] = new int[2];
if (!(neighbor instanceof AgglomerativeNeighbor))
throw new IllegalArgumentException("Expect AgglomerativeNeighbor not " + neighbor.getClass().getName());
int[][] oldIndices = ((AgglomerativeNeighbor)neighbor).getOldClusters();
int[] mergedIndices=((AgglomerativeNeighbor)neighbor).getNewCluster();
Clustering original = neighbor.getOriginal();
result[0]=getCentroid(oldIndices[0],original);
result[1]=getCentroid(oldIndices[1],original);
if(singleLink) //scores a cluster based on link between medoid of each cluster
{
AgglomerativeNeighbor pwn = new AgglomerativeNeighbor(original,original,oldIndices[0][result[0]],oldIndices[1][result[1]]);
double score = getScore(pwn);
return score;
}
//
//Returns average weighted average where weights are proportional to similarity to medoid
double[] medsA=getMedWeights(result[0],oldIndices[0],original);
double[] medsB=getMedWeights(result[1],oldIndices[1],original);
double numerator=0;
double denominator=0;
for(int i=0;i<oldIndices[0].length;i++)
{
//
//cross-boundary
for(int j=0;j<oldIndices[1].length;j++)
{
AgglomerativeNeighbor pwn = new AgglomerativeNeighbor(original,original,oldIndices[0][i],oldIndices[1][j]);
double interScore=getScore(pwn);
numerator+=interScore*medsA[i]*medsB[j];
denominator+=medsA[i]*medsB[j];
}
//
//intra-cluster1
if(mergeFirst)
{
for(int j=i+1;j<oldIndices[0].length;j++)
{
AgglomerativeNeighbor pwn = new AgglomerativeNeighbor(original,original,oldIndices[0][i],oldIndices[0][j]);
double interScore=getScore(pwn);
numerator+=interScore*medsA[i]*medsA[j];
denominator+=medsA[i]*medsA[j];
}
}
}
//
//intra-cluster2
if(mergeFirst)
{
for(int i=0;i<oldIndices[1].length;i++)
{
for(int j=i+1;j<oldIndices[1].length;j++)
{
AgglomerativeNeighbor pwn = new AgglomerativeNeighbor(original,original,oldIndices[1][i],oldIndices[1][j]);
double interScore=getScore(pwn);
numerator+=interScore*medsB[i]*medsB[j];
denominator+=medsB[i]*medsB[j];
}
}
}
return numerator/denominator;
}
private double[] getMedWeights(int medIdx,int[] indices,Clustering original)
{
double result[] = new double[indices.length];
for(int i=0;i<result.length;i++)
{
if(medIdx==i)
result[i]=1;
else
{
AgglomerativeNeighbor an = new AgglomerativeNeighbor(original,original,indices[medIdx],indices[i]);
result[i] = getScore(an);
}
}
return result;
}
//
//a bettter strategy would use caching to incrimentally determine the centroid
private int getCentroid(int[] indices,Clustering original)
{
if(indices.length<2)
return 0;
//return indices[0];
double centDist=Double.NEGATIVE_INFINITY;
int centIdx=-1;
double[] scores = new double[indices.length];
for(int i=0;i<indices.length;i++)
{
double acc=0;
for(int k=0;k<indices.length;k++)
{
if(i==k)break;
AgglomerativeNeighbor pwn = new AgglomerativeNeighbor(original,original,indices[i],indices[k]);
double score=getScore(pwn);
acc+=score;
//scores[i] = getScore(pwn);
}
acc/=(indices.length-1);
scores[i]=acc;
}
for(int i=0;i<scores.length;i++)
{
if(scores[i]>centDist)
{
centDist=scores[i];
centIdx=i;
//centIdx=indices[i];
}
}
return centIdx;
}
/*
public double evaluate (Neighbor neighbor) {
if (!(neighbor instanceof AgglomerativeNeighbor))
throw new IllegalArgumentException("Expect AgglomerativeNeighbor not " + neighbor.getClass().getName());
Clustering original = neighbor.getOriginal();
int[] mergedIndices = ((AgglomerativeNeighbor)neighbor).getNewCluster();
ArrayList scores = new ArrayList();
for (int i = 0; i < mergedIndices.length; i++) {
for (int j = i + 1; j < mergedIndices.length; j++) {
if ((original.getLabel(mergedIndices[i]) != original.getLabel(mergedIndices[j])) || mergeFirst) {
AgglomerativeNeighbor pwneighbor =
new AgglomerativeNeighbor(original, original,
mergedIndices[i], mergedIndices[j]);
scores.add(new Double(getScore(pwneighbor)));
}
}
}
if (scores.size() < 1)
throw new IllegalStateException("No pairs of Instances were scored.");
double[] vals = new double[scores.size()];
for (int i = 0; i < vals.length; i++)
vals[i] = ((Double)scores.get(i)).doubleValue();
return combiningStrategy.combine(vals);
}
*/
public void reset () {
scoreCache = null;
}
public String toString () {
return "class=" + this.getClass().getName() +
" classifier=" + classifier.getClass().getName();
}
private double getScore (AgglomerativeNeighbor pwneighbor) {
if (scoreCache == null)
scoreCache = new PairwiseMatrix(pwneighbor.getOriginal().getNumInstances());
int[] indices = pwneighbor.getNewCluster();
if (scoreCache.get(indices[0], indices[1]) == 0.0) {
scoreCache.set(indices[0], indices[1],
classifier.classify(pwneighbor).getLabelVector().value(scoringLabel));
}
return scoreCache.get(indices[0], indices[1]);
}
/**
* Specifies how to combine a set of pairwise scores into a
* cluster-wise score.
*
* @author "Aron Culotta" <[email protected]>
* @version 1.0
* @since 1.0
*/
public static interface CombiningStrategy {
public double combine (double[] scores);
}
public static class Average implements CombiningStrategy {
public double combine (double[] scores) {
return MatrixOps.mean(scores);
}
}
public static class Minimum implements CombiningStrategy {
public double combine (double[] scores) {
return MatrixOps.min(scores);
}
}
public static class Maximum implements CombiningStrategy {
public double combine (double[] scores) {
return MatrixOps.max(scores);
}
}
}
| 8,715 | 28.248322 | 125 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/tui/Text2Clusterings.java | package cc.mallet.cluster.tui;
import gnu.trove.TIntArrayList;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.util.ArrayList;
import java.util.logging.Logger;
import cc.mallet.cluster.Clustering;
import cc.mallet.cluster.Clusterings;
import cc.mallet.cluster.Record;
import cc.mallet.pipe.Noop;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.iterator.FileIterator;
import cc.mallet.types.Alphabet;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.LabelAlphabet;
import cc.mallet.util.CommandOption;
import cc.mallet.util.MalletLogger;
//In progress
public class Text2Clusterings {
private static Logger logger =
MalletLogger.getLogger(Text2Clusterings.class.getName());
public static void main (String[] args) throws IOException {
CommandOption
.setSummary(Text2Clusterings.class,
"A tool to convert a list of text files to a Clusterings.");
CommandOption.process(Text2Clusterings.class, args);
if (classDirs.value.length == 0) {
logger
.warning("You must include --input DIR1 DIR2 ...' in order to specify a"
+ "list of directories containing the documents for each class.");
System.exit(-1);
}
Clustering[] clusterings = new Clustering[classDirs.value.length];
int fi = 0;
for (int i = 0; i < classDirs.value.length; i++) {
Alphabet fieldAlph = new Alphabet();
Alphabet valueAlph = new Alphabet();
File directory = new File(classDirs.value[i]);
File[] subdirs = getSubDirs(directory);
Alphabet clusterAlph = new Alphabet();
InstanceList instances = new InstanceList(new Noop());
TIntArrayList labels = new TIntArrayList();
for (int j = 0; j < subdirs.length; j++) {
ArrayList<File> records = new FileIterator(subdirs[j]).getFileArray();
int label = clusterAlph.lookupIndex(subdirs[j].toString());
for (int k = 0; k < records.size(); k++) {
if (fi % 100 == 0) System.out.print(fi);
else if (fi % 10 == 0) System.out.print(".");
if (fi % 1000 == 0 && fi > 0) System.out.println();
System.out.flush();
fi++;
File record = records.get(k);
labels.add(label);
instances.add(new Instance(new Record(fieldAlph, valueAlph, parseFile(record)),
new Integer(label), record.toString(),
record.toString()));
}
}
clusterings[i] =
new Clustering(instances, subdirs.length, labels.toNativeArray());
}
logger.info("\nread " + fi + " objects in " + clusterings.length + " clusterings.");
try {
ObjectOutputStream oos =
new ObjectOutputStream(new FileOutputStream(outputFile.value));
oos.writeObject(new Clusterings(clusterings));
oos.close();
} catch (Exception e) {
logger.warning("Exception writing clustering to file " + outputFile.value
+ " " + e);
e.printStackTrace();
}
}
public static File[] getSubDirs (File dir) throws IOException {
ArrayList<File> ret = new ArrayList<File>();
File[] fs = dir.listFiles();
for (File f : fs)
if (f.isDirectory() && !f.getName().matches("^\\.+$"))
ret.add(f);
return ret.toArray(new File[] {});
}
public static String[][] parseFile (File f) throws IOException {
BufferedReader r = new BufferedReader(new FileReader(f));
String line = "";
ArrayList<String[]> lines = new ArrayList<String[]>();
while ((line = r.readLine()) != null) {
line = line.trim();
String[] words = line.split("\\s+");
if (words.length > 1)
lines.add(words);
}
String[][] ret = new String[lines.size()][];
for (int i = 0; i < lines.size(); i++)
ret[i] = lines.get(i);
return ret;
}
static CommandOption.SpacedStrings classDirs =
new CommandOption.SpacedStrings(
Text2Clusterings.class,
"input",
"DIR...",
true,
null,
"The directories containing text files to be clustered, one directory per clustering",
null);
static CommandOption.String outputFile =
new CommandOption.String(Text2Clusterings.class, "output", "FILENAME",
true, "text.clusterings",
"The filename to write the Clustering.", null);
}
| 4,345 | 31.676692 | 105 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/cluster/tui/Clusterings2Info.java | package cc.mallet.cluster.tui;
import java.io.FileInputStream;
import java.io.ObjectInputStream;
import java.util.logging.Logger;
import cc.mallet.cluster.Clustering;
import cc.mallet.cluster.Clusterings;
import cc.mallet.types.InstanceList;
import cc.mallet.util.CommandOption;
import cc.mallet.util.MalletLogger;
//In progress
public class Clusterings2Info {
private static Logger logger =
MalletLogger.getLogger(Clusterings2Info.class.getName());
public static void main (String[] args) {
CommandOption
.setSummary(Clusterings2Info.class,
"A tool to print statistics about a Clusterings.");
CommandOption.process(Clusterings2Info.class, args);
Clusterings clusterings = null;
try {
ObjectInputStream iis =
new ObjectInputStream(new FileInputStream(inputFile.value));
clusterings = (Clusterings) iis.readObject();
} catch (Exception e) {
System.err.println("Exception reading clusterings from "
+ inputFile.value + " " + e);
e.printStackTrace();
}
if (printOption.value) {
for (int i = 0; i < clusterings.size(); i++) {
Clustering c = clusterings.get(i);
for (int j = 0; j < c.getNumClusters(); j++) {
InstanceList cluster = c.getCluster(j);
for (int k = 0; k < cluster.size(); k++) {
System.out.println("clustering " + i + " cluster " + j + " element " + k + " " + cluster.get(k).getData());
}
System.out.println();
}
}
}
logger.info("number clusterings=" + clusterings.size());
int totalInstances = 0;
int totalClusters = 0;
for (int i = 0; i < clusterings.size(); i++) {
Clustering c = clusterings.get(i);
totalClusters += c.getNumClusters();
totalInstances += c.getNumInstances();
}
logger.info("total instances=" + totalInstances);
logger.info("total clusters=" + totalClusters);
logger.info("instances per clustering=" + (double) totalInstances
/ clusterings.size());
logger.info("instances per cluster=" + (double) totalInstances
/ totalClusters);
logger.info("clusters per clustering=" + (double) totalClusters
/ clusterings.size());
}
static CommandOption.String inputFile =
new CommandOption.String(
Clusterings2Info.class,
"input",
"FILENAME",
true,
"text.vectors",
"The filename from which to read the list of instances.",
null);
static CommandOption.Boolean printOption =
new CommandOption.Boolean(Clusterings2Info.class,
"print",
"BOOLEAN",
false,
false,
"If true, print all clusters",
null);
}
| 2,821 | 31.068182 | 113 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.