repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/WorkerRunnable.java | /* Copyright (C) 2005 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.topics;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.zip.*;
import java.io.*;
import java.text.NumberFormat;
import cc.mallet.types.*;
import cc.mallet.util.Randoms;
/**
* A parallel topic model runnable task.
*
* @author David Mimno, Andrew McCallum
*/
public class WorkerRunnable implements Runnable {
boolean isFinished = true;
ArrayList<TopicAssignment> data;
int startDoc, numDocs;
protected int numTopics; // Number of topics to be fit
// These values are used to encode type/topic counts as
// count/topic pairs in a single int.
protected int topicMask;
protected int topicBits;
protected int numTypes;
protected double[] alpha; // Dirichlet(alpha,alpha,...) is the distribution over topics
protected double alphaSum;
protected double beta; // Prior on per-topic multinomial distribution over words
protected double betaSum;
public static final double DEFAULT_BETA = 0.01;
protected double smoothingOnlyMass = 0.0;
protected double[] cachedCoefficients;
protected int[][] typeTopicCounts; // indexed by <feature index, topic index>
protected int[] tokensPerTopic; // indexed by <topic index>
// for dirichlet estimation
protected int[] docLengthCounts; // histogram of document sizes
protected int[][] topicDocCounts; // histogram of document/topic counts, indexed by <topic index, sequence position index>
boolean shouldSaveState = false;
boolean shouldBuildLocalCounts = true;
protected Randoms random;
public WorkerRunnable (int numTopics,
double[] alpha, double alphaSum,
double beta, Randoms random,
ArrayList<TopicAssignment> data,
int[][] typeTopicCounts,
int[] tokensPerTopic,
int startDoc, int numDocs) {
this.data = data;
this.numTopics = numTopics;
this.numTypes = typeTopicCounts.length;
if (Integer.bitCount(numTopics) == 1) {
// exact power of 2
topicMask = numTopics - 1;
topicBits = Integer.bitCount(topicMask);
}
else {
// otherwise add an extra bit
topicMask = Integer.highestOneBit(numTopics) * 2 - 1;
topicBits = Integer.bitCount(topicMask);
}
this.typeTopicCounts = typeTopicCounts;
this.tokensPerTopic = tokensPerTopic;
this.alphaSum = alphaSum;
this.alpha = alpha;
this.beta = beta;
this.betaSum = beta * numTypes;
this.random = random;
this.startDoc = startDoc;
this.numDocs = numDocs;
cachedCoefficients = new double[ numTopics ];
//System.err.println("WorkerRunnable Thread: " + numTopics + " topics, " + topicBits + " topic bits, " +
// Integer.toBinaryString(topicMask) + " topic mask");
}
/**
* If there is only one thread, we don't need to go through
* communication overhead. This method asks this worker not
* to prepare local type-topic counts. The method should be
* called when we are using this code in a non-threaded environment.
*/
public void makeOnlyThread() {
shouldBuildLocalCounts = false;
}
public int[] getTokensPerTopic() { return tokensPerTopic; }
public int[][] getTypeTopicCounts() { return typeTopicCounts; }
public int[] getDocLengthCounts() { return docLengthCounts; }
public int[][] getTopicDocCounts() { return topicDocCounts; }
public void initializeAlphaStatistics(int size) {
docLengthCounts = new int[size];
topicDocCounts = new int[numTopics][size];
}
public void collectAlphaStatistics() {
shouldSaveState = true;
}
public void resetBeta(double beta, double betaSum) {
this.beta = beta;
this.betaSum = betaSum;
}
/**
* Once we have sampled the local counts, trash the
* "global" type topic counts and reuse the space to
* build a summary of the type topic counts specific to
* this worker's section of the corpus.
*/
public void buildLocalTypeTopicCounts () {
// Clear the topic totals
Arrays.fill(tokensPerTopic, 0);
// Clear the type/topic counts, only
// looking at the entries before the first 0 entry.
for (int type = 0; type < typeTopicCounts.length; type++) {
int[] topicCounts = typeTopicCounts[type];
int position = 0;
while (position < topicCounts.length &&
topicCounts[position] > 0) {
topicCounts[position] = 0;
position++;
}
}
for (int doc = startDoc;
doc < data.size() && doc < startDoc + numDocs;
doc++) {
TopicAssignment document = data.get(doc);
FeatureSequence tokens = (FeatureSequence) document.instance.getData();
FeatureSequence topicSequence = (FeatureSequence) document.topicSequence;
int[] topics = topicSequence.getFeatures();
for (int position = 0; position < tokens.size(); position++) {
int topic = topics[position];
tokensPerTopic[topic]++;
// The format for these arrays is
// the topic in the rightmost bits
// the count in the remaining (left) bits.
// Since the count is in the high bits, sorting (desc)
// by the numeric value of the int guarantees that
// higher counts will be before the lower counts.
int type = tokens.getIndexAtPosition(position);
int[] currentTypeTopicCounts = typeTopicCounts[ type ];
// Start by assuming that the array is either empty
// or is in sorted (descending) order.
// Here we are only adding counts, so if we find
// an existing location with the topic, we only need
// to ensure that it is not larger than its left neighbor.
int index = 0;
int currentTopic = currentTypeTopicCounts[index] & topicMask;
int currentValue;
while (currentTypeTopicCounts[index] > 0 && currentTopic != topic) {
index++;
if (index == currentTypeTopicCounts.length) {
System.out.println("overflow on type " + type);
}
currentTopic = currentTypeTopicCounts[index] & topicMask;
}
currentValue = currentTypeTopicCounts[index] >> topicBits;
if (currentValue == 0) {
// new value is 1, so we don't have to worry about sorting
// (except by topic suffix, which doesn't matter)
currentTypeTopicCounts[index] =
(1 << topicBits) + topic;
}
else {
currentTypeTopicCounts[index] =
((currentValue + 1) << topicBits) + topic;
// Now ensure that the array is still sorted by
// bubbling this value up.
while (index > 0 &&
currentTypeTopicCounts[index] > currentTypeTopicCounts[index - 1]) {
int temp = currentTypeTopicCounts[index];
currentTypeTopicCounts[index] = currentTypeTopicCounts[index - 1];
currentTypeTopicCounts[index - 1] = temp;
index--;
}
}
}
}
}
public void run () {
try {
if (! isFinished) { System.out.println("already running!"); return; }
isFinished = false;
// Initialize the smoothing-only sampling bucket
smoothingOnlyMass = 0;
// Initialize the cached coefficients, using only smoothing.
// These values will be selectively replaced in documents with
// non-zero counts in particular topics.
for (int topic=0; topic < numTopics; topic++) {
smoothingOnlyMass += alpha[topic] * beta / (tokensPerTopic[topic] + betaSum);
cachedCoefficients[topic] = alpha[topic] / (tokensPerTopic[topic] + betaSum);
}
for (int doc = startDoc;
doc < data.size() && doc < startDoc + numDocs;
doc++) {
/*
if (doc % 10000 == 0) {
System.out.println("processing doc " + doc);
}
*/
FeatureSequence tokenSequence =
(FeatureSequence) data.get(doc).instance.getData();
LabelSequence topicSequence =
(LabelSequence) data.get(doc).topicSequence;
sampleTopicsForOneDoc (tokenSequence, topicSequence,
true);
}
if (shouldBuildLocalCounts) {
buildLocalTypeTopicCounts();
}
shouldSaveState = false;
isFinished = true;
} catch (Exception e) {
e.printStackTrace();
}
}
protected void sampleTopicsForOneDoc (FeatureSequence tokenSequence,
FeatureSequence topicSequence,
boolean readjustTopicsAndStats /* currently ignored */) {
int[] oneDocTopics = topicSequence.getFeatures();
int[] currentTypeTopicCounts;
int type, oldTopic, newTopic;
double topicWeightsSum;
int docLength = tokenSequence.getLength();
int[] localTopicCounts = new int[numTopics];
int[] localTopicIndex = new int[numTopics];
// populate topic counts
for (int position = 0; position < docLength; position++) {
localTopicCounts[oneDocTopics[position]]++;
}
// Build an array that densely lists the topics that
// have non-zero counts.
int denseIndex = 0;
for (int topic = 0; topic < numTopics; topic++) {
if (localTopicCounts[topic] != 0) {
localTopicIndex[denseIndex] = topic;
denseIndex++;
}
}
// Record the total number of non-zero topics
int nonZeroTopics = denseIndex;
// Initialize the topic count/beta sampling bucket
double topicBetaMass = 0.0;
// Initialize cached coefficients and the topic/beta
// normalizing constant.
for (denseIndex = 0; denseIndex < nonZeroTopics; denseIndex++) {
int topic = localTopicIndex[denseIndex];
int n = localTopicCounts[topic];
// initialize the normalization constant for the (B * n_{t|d}) term
topicBetaMass += beta * n / (tokensPerTopic[topic] + betaSum);
// update the coefficients for the non-zero topics
cachedCoefficients[topic] = (alpha[topic] + n) / (tokensPerTopic[topic] + betaSum);
}
double topicTermMass = 0.0;
double[] topicTermScores = new double[numTopics];
int[] topicTermIndices;
int[] topicTermValues;
int i;
double score;
// Iterate over the positions (words) in the document
for (int position = 0; position < docLength; position++) {
type = tokenSequence.getIndexAtPosition(position);
oldTopic = oneDocTopics[position];
currentTypeTopicCounts = typeTopicCounts[type];
// Remove this token from all counts.
// Remove this topic's contribution to the
// normalizing constants
smoothingOnlyMass -= alpha[oldTopic] * beta /
(tokensPerTopic[oldTopic] + betaSum);
topicBetaMass -= beta * localTopicCounts[oldTopic] /
(tokensPerTopic[oldTopic] + betaSum);
// Decrement the local doc/topic counts
localTopicCounts[oldTopic]--;
// Maintain the dense index, if we are deleting
// the old topic
if (localTopicCounts[oldTopic] == 0) {
// First get to the dense location associated with
// the old topic.
denseIndex = 0;
// We know it's in there somewhere, so we don't
// need bounds checking.
while (localTopicIndex[denseIndex] != oldTopic) {
denseIndex++;
}
// shift all remaining dense indices to the left.
while (denseIndex < nonZeroTopics) {
if (denseIndex < localTopicIndex.length - 1) {
localTopicIndex[denseIndex] =
localTopicIndex[denseIndex + 1];
}
denseIndex++;
}
nonZeroTopics --;
}
// Decrement the global topic count totals
tokensPerTopic[oldTopic]--;
assert(tokensPerTopic[oldTopic] >= 0) : "old Topic " + oldTopic + " below 0";
// Add the old topic's contribution back into the
// normalizing constants.
smoothingOnlyMass += alpha[oldTopic] * beta /
(tokensPerTopic[oldTopic] + betaSum);
topicBetaMass += beta * localTopicCounts[oldTopic] /
(tokensPerTopic[oldTopic] + betaSum);
// Reset the cached coefficient for this topic
cachedCoefficients[oldTopic] =
(alpha[oldTopic] + localTopicCounts[oldTopic]) /
(tokensPerTopic[oldTopic] + betaSum);
// Now go over the type/topic counts, decrementing
// where appropriate, and calculating the score
// for each topic at the same time.
int index = 0;
int currentTopic, currentValue;
boolean alreadyDecremented = false;
topicTermMass = 0.0;
while (index < currentTypeTopicCounts.length &&
currentTypeTopicCounts[index] > 0) {
currentTopic = currentTypeTopicCounts[index] & topicMask;
currentValue = currentTypeTopicCounts[index] >> topicBits;
if (! alreadyDecremented &&
currentTopic == oldTopic) {
// We're decrementing and adding up the
// sampling weights at the same time, but
// decrementing may require us to reorder
// the topics, so after we're done here,
// look at this cell in the array again.
currentValue --;
if (currentValue == 0) {
currentTypeTopicCounts[index] = 0;
}
else {
currentTypeTopicCounts[index] =
(currentValue << topicBits) + oldTopic;
}
// Shift the reduced value to the right, if necessary.
int subIndex = index;
while (subIndex < currentTypeTopicCounts.length - 1 &&
currentTypeTopicCounts[subIndex] < currentTypeTopicCounts[subIndex + 1]) {
int temp = currentTypeTopicCounts[subIndex];
currentTypeTopicCounts[subIndex] = currentTypeTopicCounts[subIndex + 1];
currentTypeTopicCounts[subIndex + 1] = temp;
subIndex++;
}
alreadyDecremented = true;
}
else {
score =
cachedCoefficients[currentTopic] * currentValue;
topicTermMass += score;
topicTermScores[index] = score;
index++;
}
}
double sample = random.nextUniform() * (smoothingOnlyMass + topicBetaMass + topicTermMass);
double origSample = sample;
// Make sure it actually gets set
newTopic = -1;
if (sample < topicTermMass) {
//topicTermCount++;
i = -1;
while (sample > 0) {
i++;
sample -= topicTermScores[i];
}
newTopic = currentTypeTopicCounts[i] & topicMask;
currentValue = currentTypeTopicCounts[i] >> topicBits;
currentTypeTopicCounts[i] = ((currentValue + 1) << topicBits) + newTopic;
// Bubble the new value up, if necessary
while (i > 0 &&
currentTypeTopicCounts[i] > currentTypeTopicCounts[i - 1]) {
int temp = currentTypeTopicCounts[i];
currentTypeTopicCounts[i] = currentTypeTopicCounts[i - 1];
currentTypeTopicCounts[i - 1] = temp;
i--;
}
}
else {
sample -= topicTermMass;
if (sample < topicBetaMass) {
//betaTopicCount++;
sample /= beta;
for (denseIndex = 0; denseIndex < nonZeroTopics; denseIndex++) {
int topic = localTopicIndex[denseIndex];
sample -= localTopicCounts[topic] /
(tokensPerTopic[topic] + betaSum);
if (sample <= 0.0) {
newTopic = topic;
break;
}
}
}
else {
//smoothingOnlyCount++;
sample -= topicBetaMass;
sample /= beta;
newTopic = 0;
sample -= alpha[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
while (sample > 0.0) {
newTopic++;
sample -= alpha[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
}
}
// Move to the position for the new topic,
// which may be the first empty position if this
// is a new topic for this word.
index = 0;
while (currentTypeTopicCounts[index] > 0 &&
(currentTypeTopicCounts[index] & topicMask) != newTopic) {
index++;
if (index == currentTypeTopicCounts.length) {
System.err.println("type: " + type + " new topic: " + newTopic);
for (int k=0; k<currentTypeTopicCounts.length; k++) {
System.err.print((currentTypeTopicCounts[k] & topicMask) + ":" +
(currentTypeTopicCounts[k] >> topicBits) + " ");
}
System.err.println();
}
}
// index should now be set to the position of the new topic,
// which may be an empty cell at the end of the list.
if (currentTypeTopicCounts[index] == 0) {
// inserting a new topic, guaranteed to be in
// order w.r.t. count, if not topic.
currentTypeTopicCounts[index] = (1 << topicBits) + newTopic;
}
else {
currentValue = currentTypeTopicCounts[index] >> topicBits;
currentTypeTopicCounts[index] = ((currentValue + 1) << topicBits) + newTopic;
// Bubble the increased value left, if necessary
while (index > 0 &&
currentTypeTopicCounts[index] > currentTypeTopicCounts[index - 1]) {
int temp = currentTypeTopicCounts[index];
currentTypeTopicCounts[index] = currentTypeTopicCounts[index - 1];
currentTypeTopicCounts[index - 1] = temp;
index--;
}
}
}
if (newTopic == -1) {
System.err.println("WorkerRunnable sampling error: "+ origSample + " " + sample + " " + smoothingOnlyMass + " " +
topicBetaMass + " " + topicTermMass);
newTopic = numTopics-1; // TODO is this appropriate
//throw new IllegalStateException ("WorkerRunnable: New topic not sampled.");
}
//assert(newTopic != -1);
// Put that new topic into the counts
oneDocTopics[position] = newTopic;
smoothingOnlyMass -= alpha[newTopic] * beta /
(tokensPerTopic[newTopic] + betaSum);
topicBetaMass -= beta * localTopicCounts[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
localTopicCounts[newTopic]++;
// If this is a new topic for this document,
// add the topic to the dense index.
if (localTopicCounts[newTopic] == 1) {
// First find the point where we
// should insert the new topic by going to
// the end (which is the only reason we're keeping
// track of the number of non-zero
// topics) and working backwards
denseIndex = nonZeroTopics;
while (denseIndex > 0 &&
localTopicIndex[denseIndex - 1] > newTopic) {
localTopicIndex[denseIndex] =
localTopicIndex[denseIndex - 1];
denseIndex--;
}
localTopicIndex[denseIndex] = newTopic;
nonZeroTopics++;
}
tokensPerTopic[newTopic]++;
// update the coefficients for the non-zero topics
cachedCoefficients[newTopic] =
(alpha[newTopic] + localTopicCounts[newTopic]) /
(tokensPerTopic[newTopic] + betaSum);
smoothingOnlyMass += alpha[newTopic] * beta /
(tokensPerTopic[newTopic] + betaSum);
topicBetaMass += beta * localTopicCounts[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
}
if (shouldSaveState) {
// Update the document-topic count histogram,
// for dirichlet estimation
docLengthCounts[ docLength ]++;
for (denseIndex = 0; denseIndex < nonZeroTopics; denseIndex++) {
int topic = localTopicIndex[denseIndex];
topicDocCounts[topic][ localTopicCounts[topic] ]++;
}
}
// Clean up our mess: reset the coefficients to values with only
// smoothing. The next doc will update its own non-zero topics...
for (denseIndex = 0; denseIndex < nonZeroTopics; denseIndex++) {
int topic = localTopicIndex[denseIndex];
cachedCoefficients[topic] =
alpha[topic] / (tokensPerTopic[topic] + betaSum);
}
}
}
| 19,211 | 27.803598 | 123 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/PAM4L.java | package cc.mallet.topics;
/* Copyright (C) 2005 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
import cc.mallet.types.*;
import cc.mallet.util.Randoms;
import java.util.Arrays;
import java.io.*;
import java.text.NumberFormat;
/**
* Four Level Pachinko Allocation with MLE learning,
* based on Andrew's Latent Dirichlet Allocation.
* @author David Mimno
*/
public class PAM4L {
// Parameters
int numSuperTopics; // Number of topics to be fit
int numSubTopics;
double[] alpha; // Dirichlet(alpha,alpha,...) is the distribution over supertopics
double alphaSum;
double[][] subAlphas;
double[] subAlphaSums;
double beta; // Prior on per-topic multinomial distribution over words
double vBeta;
// Data
InstanceList ilist; // the data field of the instances is expected to hold a FeatureSequence
int numTypes;
int numTokens;
// Gibbs sampling state
// (these could be shorts, or we could encode both in one int)
int[][] superTopics; // indexed by <document index, sequence index>
int[][] subTopics; // indexed by <document index, sequence index>
// Per-document state variables
int[][] superSubCounts; // # of words per <super, sub>
int[] superCounts; // # of words per <super>
double[] superWeights; // the component of the Gibbs update that depends on super-topics
double[] subWeights; // the component of the Gibbs update that depends on sub-topics
double[][] superSubWeights; // unnormalized sampling distribution
double[] cumulativeSuperWeights; // a cache of the cumulative weight for each super-topic
// Per-word type state variables
int[][] typeSubTopicCounts; // indexed by <feature index, topic index>
int[] tokensPerSubTopic; // indexed by <topic index>
// [for debugging purposes]
int[] tokensPerSuperTopic; // indexed by <topic index>
int[][] tokensPerSuperSubTopic;
// Histograms for MLE
int[][] superTopicHistograms; // histogram of # of words per supertopic in documents
// eg, [17][4] is # of docs with 4 words in sT 17...
int[][][] subTopicHistograms; // for each supertopic, histogram of # of words per subtopic
Runtime runtime;
NumberFormat formatter;
public PAM4L (int superTopics, int subTopics) {
this (superTopics, subTopics, 50.0, 0.001);
}
public PAM4L (int superTopics, int subTopics,
double alphaSum, double beta) {
formatter = NumberFormat.getInstance();
formatter.setMaximumFractionDigits(5);
this.numSuperTopics = superTopics;
this.numSubTopics = subTopics;
this.alphaSum = alphaSum;
this.alpha = new double[superTopics];
Arrays.fill(alpha, alphaSum / numSuperTopics);
subAlphas = new double[superTopics][subTopics];
subAlphaSums = new double[superTopics];
// Initialize the sub-topic alphas to a symmetric dirichlet.
for (int superTopic = 0; superTopic < superTopics; superTopic++) {
Arrays.fill(subAlphas[superTopic], 1.0);
}
Arrays.fill(subAlphaSums, subTopics);
this.beta = beta; // We can't calculate vBeta until we know how many word types...
runtime = Runtime.getRuntime();
}
public void estimate (InstanceList documents, int numIterations, int optimizeInterval,
int showTopicsInterval,
int outputModelInterval, String outputModelFilename,
Randoms r)
{
ilist = documents;
numTypes = ilist.getDataAlphabet().size ();
int numDocs = ilist.size();
superTopics = new int[numDocs][];
subTopics = new int[numDocs][];
// Allocate several arrays for use within each document
// to cut down memory allocation and garbage collection time
superSubCounts = new int[numSuperTopics][numSubTopics];
superCounts = new int[numSuperTopics];
superWeights = new double[numSuperTopics];
subWeights = new double[numSubTopics];
superSubWeights = new double[numSuperTopics][numSubTopics];
cumulativeSuperWeights = new double[numSuperTopics];
typeSubTopicCounts = new int[numTypes][numSubTopics];
tokensPerSubTopic = new int[numSubTopics];
tokensPerSuperTopic = new int[numSuperTopics];
tokensPerSuperSubTopic = new int[numSuperTopics][numSubTopics];
vBeta = beta * numTypes;
long startTime = System.currentTimeMillis();
int maxTokens = 0;
// Initialize with random assignments of tokens to topics
// and finish allocating this.topics and this.tokens
int superTopic, subTopic, seqLen;
for (int di = 0; di < numDocs; di++) {
FeatureSequence fs = (FeatureSequence) ilist.get(di).getData();
seqLen = fs.getLength();
if (seqLen > maxTokens) {
maxTokens = seqLen;
}
numTokens += seqLen;
superTopics[di] = new int[seqLen];
subTopics[di] = new int[seqLen];
// Randomly assign tokens to topics
for (int si = 0; si < seqLen; si++) {
// Random super-topic
superTopic = r.nextInt(numSuperTopics);
superTopics[di][si] = superTopic;
tokensPerSuperTopic[superTopic]++;
// Random sub-topic
subTopic = r.nextInt(numSubTopics);
subTopics[di][si] = subTopic;
// For the sub-topic, we also need to update the
// word type statistics
typeSubTopicCounts[ fs.getIndexAtPosition(si) ][subTopic]++;
tokensPerSubTopic[subTopic]++;
tokensPerSuperSubTopic[superTopic][subTopic]++;
}
}
System.out.println("max tokens: " + maxTokens);
// These will be initialized at the first call to
// clearHistograms() in the loop below.
superTopicHistograms = new int[numSuperTopics][maxTokens + 1];
subTopicHistograms = new int[numSuperTopics][numSubTopics][maxTokens + 1];
// Finally, start the sampler!
for (int iterations = 0; iterations < numIterations; iterations++) {
long iterationStart = System.currentTimeMillis();
clearHistograms();
sampleTopicsForAllDocs (r);
// There are a few things we do on round-numbered iterations
// that don't make sense if this is the first iteration.
if (iterations > 0) {
if (showTopicsInterval != 0 && iterations % showTopicsInterval == 0) {
System.out.println ();
printTopWords (5, false);
}
if (outputModelInterval != 0 && iterations % outputModelInterval == 0) {
//this.write (new File(outputModelFilename+'.'+iterations));
}
if (optimizeInterval != 0 && iterations % optimizeInterval == 0) {
long optimizeTime = System.currentTimeMillis();
for (superTopic = 0; superTopic < numSuperTopics; superTopic++) {
learnParameters(subAlphas[superTopic],
subTopicHistograms[superTopic],
superTopicHistograms[superTopic]);
subAlphaSums[superTopic] = 0.0;
for (subTopic = 0; subTopic < numSubTopics; subTopic++) {
subAlphaSums[superTopic] += subAlphas[superTopic][subTopic];
}
}
System.out.print("[o:" + (System.currentTimeMillis() - optimizeTime) + "]");
}
}
if (iterations > 1107) {
printWordCounts();
}
if (iterations % 10 == 0)
System.out.println ("<" + iterations + "> ");
System.out.print((System.currentTimeMillis() - iterationStart) + " ");
//else System.out.print (".");
System.out.flush();
}
long seconds = Math.round((System.currentTimeMillis() - startTime)/1000.0);
long minutes = seconds / 60; seconds %= 60;
long hours = minutes / 60; minutes %= 60;
long days = hours / 24; hours %= 24;
System.out.print ("\nTotal time: ");
if (days != 0) { System.out.print(days); System.out.print(" days "); }
if (hours != 0) { System.out.print(hours); System.out.print(" hours "); }
if (minutes != 0) { System.out.print(minutes); System.out.print(" minutes "); }
System.out.print(seconds); System.out.println(" seconds");
// 124.5 seconds
// 144.8 seconds after using FeatureSequence instead of tokens[][] array
// 121.6 seconds after putting "final" on FeatureSequence.getIndexAtPosition()
// 106.3 seconds after avoiding array lookup in inner loop with a temporary variable
}
private void clearHistograms() {
for (int superTopic = 0; superTopic < numSuperTopics; superTopic++) {
Arrays.fill(superTopicHistograms[superTopic], 0);
for (int subTopic = 0; subTopic < numSubTopics; subTopic++) {
Arrays.fill(subTopicHistograms[superTopic][subTopic], 0);
}
}
}
/** Use the fixed point iteration described by Tom Minka. */
public void learnParameters(double[] parameters, int[][] observations, int[] observationLengths) {
int i, k;
double parametersSum = 0;
// Initialize the parameter sum
for (k=0; k < parameters.length; k++) {
parametersSum += parameters[k];
}
double oldParametersK;
double currentDigamma;
double denominator;
int[] histogram;
int nonZeroLimit;
int[] nonZeroLimits = new int[observations.length];
Arrays.fill(nonZeroLimits, -1);
// The histogram arrays go up to the size of the largest document,
// but the non-zero values will almost always cluster in the low end.
// We avoid looping over empty arrays by saving the index of the largest
// non-zero value.
for (i=0; i<observations.length; i++) {
histogram = observations[i];
for (k = 0; k < histogram.length; k++) {
if (histogram[k] > 0) {
nonZeroLimits[i] = k;
}
}
}
for (int iteration=0; iteration<200; iteration++) {
// Calculate the denominator
denominator = 0;
currentDigamma = 0;
// Iterate over the histogram:
for (i=1; i<observationLengths.length; i++) {
currentDigamma += 1 / (parametersSum + i - 1);
denominator += observationLengths[i] * currentDigamma;
}
/*
if (Double.isNaN(denominator)) {
System.out.println(parameterSum);
for (i=1; i < observationLengths.length; i++) {
System.out.print(observationLengths[i] + " ");
}
System.out.println();
}
*/
// Calculate the individual parameters
parametersSum = 0;
for (k=0; k<parameters.length; k++) {
// What's the largest non-zero element in the histogram?
nonZeroLimit = nonZeroLimits[k];
// If there are no tokens assigned to this super-sub pair
// anywhere in the corpus, bail.
if (nonZeroLimit == -1) {
parameters[k] = 0.000001;
parametersSum += 0.000001;
continue;
}
oldParametersK = parameters[k];
parameters[k] = 0;
currentDigamma = 0;
histogram = observations[k];
for (i=1; i <= nonZeroLimit; i++) {
currentDigamma += 1 / (oldParametersK + i - 1);
parameters[k] += histogram[i] * currentDigamma;
}
parameters[k] *= oldParametersK / denominator;
if (Double.isNaN(parameters[k])) {
System.out.println("parametersK *= " +
oldParametersK + " / " +
denominator);
for (i=1; i < histogram.length; i++) {
System.out.print(histogram[i] + " ");
}
System.out.println();
}
parametersSum += parameters[k];
}
}
}
/* One iteration of Gibbs sampling, across all documents. */
private void sampleTopicsForAllDocs (Randoms r)
{
// Loop over every word in the corpus
for (int di = 0; di < superTopics.length; di++) {
sampleTopicsForOneDoc ((FeatureSequence)ilist.get(di).getData(),
superTopics[di], subTopics[di], r);
}
}
private void sampleTopicsForOneDoc (FeatureSequence oneDocTokens,
int[] superTopics, // indexed by seq position
int[] subTopics,
Randoms r) {
// long startTime = System.currentTimeMillis();
int[] currentTypeSubTopicCounts;
int[] currentSuperSubCounts;
double[] currentSuperSubWeights;
double[] currentSubAlpha;
int type, subTopic, superTopic;
double currentSuperWeight, cumulativeWeight, sample;
int docLen = oneDocTokens.getLength();
for (int t = 0; t < numSuperTopics; t++) {
Arrays.fill(superSubCounts[t], 0);
}
Arrays.fill(superCounts, 0);
// populate topic counts
for (int si = 0; si < docLen; si++) {
superSubCounts[ superTopics[si] ][ subTopics[si] ]++;
superCounts[ superTopics[si] ]++;
}
// Iterate over the positions (words) in the document
for (int si = 0; si < docLen; si++) {
type = oneDocTokens.getIndexAtPosition(si);
superTopic = superTopics[si];
subTopic = subTopics[si];
// Remove this token from all counts
superSubCounts[superTopic][subTopic]--;
superCounts[superTopic]--;
typeSubTopicCounts[type][subTopic]--;
tokensPerSuperTopic[superTopic]--;
tokensPerSubTopic[subTopic]--;
tokensPerSuperSubTopic[superTopic][subTopic]--;
// Build a distribution over super-sub topic pairs
// for this token
// Clear the data structures
for (int t = 0; t < numSuperTopics; t++) {
Arrays.fill(superSubWeights[t], 0.0);
}
Arrays.fill(superWeights, 0.0);
Arrays.fill(subWeights, 0.0);
Arrays.fill(cumulativeSuperWeights, 0.0);
// Avoid two layer (ie [][]) array accesses
currentTypeSubTopicCounts = typeSubTopicCounts[type];
// The conditional probability of each super-sub pair is proportional
// to an expression with three parts, one that depends only on the
// super-topic, one that depends only on the sub-topic and the word type,
// and one that depends on the super-sub pair.
// Calculate each of the super-only factors first
for (superTopic = 0; superTopic < numSuperTopics; superTopic++) {
superWeights[superTopic] = ((double) superCounts[superTopic] + alpha[superTopic]) /
((double) superCounts[superTopic] + subAlphaSums[superTopic]);
}
// Next calculate the sub-only factors
for (subTopic = 0; subTopic < numSubTopics; subTopic++) {
subWeights[subTopic] = ((double) currentTypeSubTopicCounts[subTopic] + beta) /
((double) tokensPerSubTopic[subTopic] + vBeta);
}
// Finally, put them together
cumulativeWeight = 0.0;
for (superTopic = 0; superTopic < numSuperTopics; superTopic++) {
currentSuperSubWeights = superSubWeights[superTopic];
currentSuperSubCounts = superSubCounts[superTopic];
currentSubAlpha = subAlphas[superTopic];
currentSuperWeight = superWeights[superTopic];
for (subTopic = 0; subTopic < numSubTopics; subTopic++) {
currentSuperSubWeights[subTopic] =
currentSuperWeight *
subWeights[subTopic] *
((double) currentSuperSubCounts[subTopic] + currentSubAlpha[subTopic]);
cumulativeWeight += currentSuperSubWeights[subTopic];
}
cumulativeSuperWeights[superTopic] = cumulativeWeight;
}
// Sample a topic assignment from this distribution
sample = r.nextUniform() * cumulativeWeight;
// Go over the row sums to find the super-topic...
superTopic = 0;
while (sample > cumulativeSuperWeights[superTopic]) {
superTopic++;
}
// Now read across to find the sub-topic
currentSuperSubWeights = superSubWeights[superTopic];
cumulativeWeight = cumulativeSuperWeights[superTopic] -
currentSuperSubWeights[0];
// Go over each sub-topic until the weight is LESS than
// the sample. Note that we're subtracting weights
// in the same order we added them...
subTopic = 0;
while (sample < cumulativeWeight) {
subTopic++;
cumulativeWeight -= currentSuperSubWeights[subTopic];
}
// Save the choice into the Gibbs state
superTopics[si] = superTopic;
subTopics[si] = subTopic;
// Put the new super/sub topics into the counts
superSubCounts[superTopic][subTopic]++;
superCounts[superTopic]++;
typeSubTopicCounts[type][subTopic]++;
tokensPerSuperTopic[superTopic]++;
tokensPerSubTopic[subTopic]++;
tokensPerSuperSubTopic[superTopic][subTopic]++;
}
// Update the topic count histograms
// for dirichlet estimation
for (superTopic = 0; superTopic < numSuperTopics; superTopic++) {
superTopicHistograms[superTopic][ superCounts[superTopic] ]++;
currentSuperSubCounts = superSubCounts[superTopic];
for (subTopic = 0; subTopic < numSubTopics; subTopic++) {
subTopicHistograms[superTopic][subTopic][ currentSuperSubCounts[subTopic] ]++;
}
}
}
public void printWordCounts () {
int subTopic, superTopic;
StringBuffer output = new StringBuffer();
for (superTopic = 0; superTopic < numSuperTopics; superTopic++) {
for (subTopic = 0; subTopic < numSubTopics; subTopic++) {
output.append (tokensPerSuperSubTopic[superTopic][subTopic] + " (" +
formatter.format(subAlphas[superTopic][subTopic]) + ")\t");
}
output.append("\n");
}
System.out.println(output);
}
public void printTopWords (int numWords, boolean useNewLines) {
IDSorter[] wp = new IDSorter[numTypes];
IDSorter[] sortedSubTopics = new IDSorter[numSubTopics];
String[] subTopicTerms = new String[numSubTopics];
int subTopic, superTopic;
for (subTopic = 0; subTopic < numSubTopics; subTopic++) {
for (int wi = 0; wi < numTypes; wi++)
wp[wi] = new IDSorter (wi, (((double) typeSubTopicCounts[wi][subTopic]) /
tokensPerSubTopic[subTopic]));
Arrays.sort (wp);
StringBuffer topicTerms = new StringBuffer();
for (int i = 0; i < numWords; i++) {
topicTerms.append(ilist.getDataAlphabet().lookupObject(wp[i].wi));
topicTerms.append(" ");
}
subTopicTerms[subTopic] = topicTerms.toString();
if (useNewLines) {
System.out.println ("\nTopic " + subTopic);
for (int i = 0; i < numWords; i++)
System.out.println (ilist.getDataAlphabet().lookupObject(wp[i].wi).toString() +
"\t" + formatter.format(wp[i].p));
} else {
System.out.println ("Topic "+ subTopic +":\t[" + tokensPerSubTopic[subTopic] + "]\t" +
subTopicTerms[subTopic]);
}
}
int maxSubTopics = 10;
if (numSubTopics < 10) { maxSubTopics = numSubTopics; }
for (superTopic = 0; superTopic < numSuperTopics; superTopic++) {
for (subTopic = 0; subTopic < numSubTopics; subTopic++) {
sortedSubTopics[subTopic] = new IDSorter(subTopic, subAlphas[superTopic][subTopic]);
}
Arrays.sort(sortedSubTopics);
System.out.println("\nSuper-topic " + superTopic +
"[" + tokensPerSuperTopic[superTopic] + "]\t");
for (int i = 0; i < maxSubTopics; i++) {
subTopic = sortedSubTopics[i].wi;
System.out.println(subTopic + ":\t" +
formatter.format(subAlphas[superTopic][subTopic]) + "\t" +
subTopicTerms[subTopic]);
}
}
}
public void printDocumentTopics (File f) throws IOException {
printDocumentTopics (new PrintWriter (new BufferedWriter( new FileWriter (f))), 0.0, -1);
}
// This looks broken. -DM
public void printDocumentTopics (PrintWriter pw, double threshold, int max) {
pw.println ("#doc source subtopic-proportions , supertopic-proportions");
int docLen;
double superTopicDist[] = new double [numSuperTopics];
double subTopicDist[] = new double [numSubTopics];
for (int di = 0; di < superTopics.length; di++) {
pw.print (di); pw.print (' ');
docLen = superTopics[di].length;
if (ilist.get(di).getSource() != null){
pw.print (ilist.get(di).getSource().toString());
}
else {
pw.print("null-source");
}
pw.print (' ');
docLen = subTopics[di].length;
// populate per-document topic counts
for (int si = 0; si < docLen; si++) {
superTopicDist[superTopics[di][si]] += 1.0;
subTopicDist[subTopics[di][si]] += 1.0;
}
for (int ti = 0; ti < numSuperTopics; ti++)
superTopicDist[ti] /= docLen;
for (int ti = 0; ti < numSubTopics; ti++)
subTopicDist[ti] /= docLen;
// print the subtopic prortions, sorted
if (max < 0) max = numSubTopics;
for (int tp = 0; tp < max; tp++) {
double maxvalue = 0;
int maxindex = -1;
for (int ti = 0; ti < numSubTopics; ti++)
if (subTopicDist[ti] > maxvalue) {
maxvalue = subTopicDist[ti];
maxindex = ti;
}
if (maxindex == -1 || subTopicDist[maxindex] < threshold)
break;
pw.print (maxindex+" "+subTopicDist[maxindex]+" ");
subTopicDist[maxindex] = 0;
}
pw.print (" , ");
// print the supertopic prortions, sorted
if (max < 0) max = numSuperTopics;
for (int tp = 0; tp < max; tp++) {
double maxvalue = 0;
int maxindex = -1;
for (int ti = 0; ti < numSuperTopics; ti++)
if (superTopicDist[ti] > maxvalue) {
maxvalue = superTopicDist[ti];
maxindex = ti;
}
if (maxindex == -1 || superTopicDist[maxindex] < threshold)
break;
pw.print (maxindex+" "+superTopicDist[maxindex]+" ");
superTopicDist[maxindex] = 0;
}
pw.println ();
}
}
public void printState (File f) throws IOException
{
printState (new PrintWriter (new BufferedWriter (new FileWriter(f))));
}
public void printState (PrintWriter pw)
{
Alphabet a = ilist.getDataAlphabet();
pw.println ("#doc pos typeindex type super-topic sub-topic");
for (int di = 0; di < superTopics.length; di++) {
FeatureSequence fs = (FeatureSequence) ilist.get(di).getData();
for (int si = 0; si < superTopics[di].length; si++) {
int type = fs.getIndexAtPosition(si);
pw.print(di); pw.print(' ');
pw.print(si); pw.print(' ');
pw.print(type); pw.print(' ');
pw.print(a.lookupObject(type)); pw.print(' ');
pw.print(superTopics[di][si]); pw.print(' ');
pw.print(subTopics[di][si]); pw.println();
}
}
pw.close();
}
/*
public void write (File f) {
try {
ObjectOutputStream oos = new ObjectOutputStream (new FileOutputStream(f));
oos.writeObject(this);
oos.close();
}
catch (IOException e) {
System.err.println("Exception writing file " + f + ": " + e);
}
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private static final int NULL_INTEGER = -1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject (ilist);
out.writeInt (numTopics);
out.writeObject (alpha);
out.writeDouble (beta);
out.writeDouble (vBeta);
for (int di = 0; di < topics.length; di ++)
for (int si = 0; si < topics[di].length; si++)
out.writeInt (topics[di][si]);
for (int fi = 0; fi < numTypes; fi++)
for (int ti = 0; ti < numTopics; ti++)
out.writeInt (typeTopicCounts[fi][ti]);
for (int ti = 0; ti < numTopics; ti++)
out.writeInt (tokensPerTopic[ti]);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int featuresLength;
int version = in.readInt ();
ilist = (InstanceList) in.readObject ();
numTopics = in.readInt();
alpha = (double[]) in.readObject();
beta = in.readDouble();
vBeta = in.readDouble();
int numDocs = ilist.size();
topics = new int[numDocs][];
for (int di = 0; di < ilist.size(); di++) {
int docLen = ((FeatureSequence)ilist.getInstance(di).getData()).getLength();
topics[di] = new int[docLen];
for (int si = 0; si < docLen; si++)
topics[di][si] = in.readInt();
}
int numTypes = ilist.getDataAlphabet().size();
typeTopicCounts = new int[numTypes][numTopics];
for (int fi = 0; fi < numTypes; fi++)
for (int ti = 0; ti < numTopics; ti++)
typeTopicCounts[fi][ti] = in.readInt();
tokensPerTopic = new int[numTopics];
for (int ti = 0; ti < numTopics; ti++)
tokensPerTopic[ti] = in.readInt();
}
*/
// Recommended to use mallet/bin/vectors2topics instead.
public static void main (String[] args) throws IOException
{
InstanceList ilist = InstanceList.load (new File(args[0]));
int numIterations = args.length > 1 ? Integer.parseInt(args[1]) : 1000;
int numTopWords = args.length > 2 ? Integer.parseInt(args[2]) : 20;
int numSuperTopics = args.length > 3 ? Integer.parseInt(args[3]) : 10;
int numSubTopics = args.length > 4 ? Integer.parseInt(args[4]) : 10;
System.out.println ("Data loaded.");
PAM4L pam = new PAM4L (numSuperTopics, numSubTopics);
pam.estimate (ilist, numIterations, 50, 0, 50, null, new Randoms()); // should be 1100
pam.printTopWords (numTopWords, true);
// pam.printDocumentTopics (new File(args[0]+".pam"));
}
class IDSorter implements Comparable {
int wi; double p;
public IDSorter (int wi, double p) { this.wi = wi; this.p = p; }
public final int compareTo (Object o2) {
if (p > ((IDSorter) o2).p)
return -1;
else if (p == ((IDSorter) o2).p)
return 0;
else return 1;
}
}
}
| 24,501 | 30.820779 | 99 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/LDAHyper.java | /* Copyright (C) 2005 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.topics;
import gnu.trove.TIntIntHashMap;
import java.util.Arrays;
import java.util.List;
import java.util.ArrayList;
import java.util.TreeSet;
import java.util.Iterator;
import java.util.zip.*;
import java.io.*;
import java.text.NumberFormat;
import cc.mallet.types.*;
import cc.mallet.util.Randoms;
/**
* Latent Dirichlet Allocation with optimized hyperparameters
*
* @author David Mimno, Andrew McCallum
* @deprecated Use ParallelTopicModel instead, which uses substantially faster data structures even for non-parallel operation.
*/
public class LDAHyper implements Serializable {
// Analogous to a cc.mallet.classify.Classification
public class Topication implements Serializable {
public Instance instance;
public LDAHyper model;
public LabelSequence topicSequence;
public Labeling topicDistribution; // not actually constructed by model fitting, but could be added for "test" documents.
public Topication (Instance instance, LDAHyper model, LabelSequence topicSequence) {
this.instance = instance;
this.model = model;
this.topicSequence = topicSequence;
}
// Maintainable serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject (instance);
out.writeObject (model);
out.writeObject (topicSequence);
out.writeObject (topicDistribution);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
instance = (Instance) in.readObject();
model = (LDAHyper) in.readObject();
topicSequence = (LabelSequence) in.readObject();
topicDistribution = (Labeling) in.readObject();
}
}
protected ArrayList<Topication> data; // the training instances and their topic assignments
protected Alphabet alphabet; // the alphabet for the input data
protected LabelAlphabet topicAlphabet; // the alphabet for the topics
protected int numTopics; // Number of topics to be fit
protected int numTypes;
protected double[] alpha; // Dirichlet(alpha,alpha,...) is the distribution over topics
protected double alphaSum;
protected double beta; // Prior on per-topic multinomial distribution over words
protected double betaSum;
public static final double DEFAULT_BETA = 0.01;
protected double smoothingOnlyMass = 0.0;
protected double[] cachedCoefficients;
int topicTermCount = 0;
int betaTopicCount = 0;
int smoothingOnlyCount = 0;
// Instance list for empirical likelihood calculation
protected InstanceList testing = null;
// An array to put the topic counts for the current document.
// Initialized locally below. Defined here to avoid
// garbage collection overhead.
protected int[] oneDocTopicCounts; // indexed by <document index, topic index>
protected gnu.trove.TIntIntHashMap[] typeTopicCounts; // indexed by <feature index, topic index>
protected int[] tokensPerTopic; // indexed by <topic index>
// for dirichlet estimation
protected int[] docLengthCounts; // histogram of document sizes
protected int[][] topicDocCounts; // histogram of document/topic counts, indexed by <topic index, sequence position index>
public int iterationsSoFar = 0;
public int numIterations = 1000;
public int burninPeriod = 20; // was 50; //was 200;
public int saveSampleInterval = 5; // was 10;
public int optimizeInterval = 20; // was 50;
public int showTopicsInterval = 10; // was 50;
public int wordsPerTopic = 7;
protected int outputModelInterval = 0;
protected String outputModelFilename;
protected int saveStateInterval = 0;
protected String stateFilename = null;
protected Randoms random;
protected NumberFormat formatter;
protected boolean printLogLikelihood = false;
public LDAHyper (int numberOfTopics) {
this (numberOfTopics, numberOfTopics, DEFAULT_BETA);
}
public LDAHyper (int numberOfTopics, double alphaSum, double beta) {
this (numberOfTopics, alphaSum, beta, new Randoms());
}
private static LabelAlphabet newLabelAlphabet (int numTopics) {
LabelAlphabet ret = new LabelAlphabet();
for (int i = 0; i < numTopics; i++)
ret.lookupIndex("topic"+i);
return ret;
}
public LDAHyper (int numberOfTopics, double alphaSum, double beta, Randoms random) {
this (newLabelAlphabet (numberOfTopics), alphaSum, beta, random);
}
public LDAHyper (LabelAlphabet topicAlphabet, double alphaSum, double beta, Randoms random)
{
this.data = new ArrayList<Topication>();
this.topicAlphabet = topicAlphabet;
this.numTopics = topicAlphabet.size();
this.alphaSum = alphaSum;
this.alpha = new double[numTopics];
Arrays.fill(alpha, alphaSum / numTopics);
this.beta = beta;
this.random = random;
oneDocTopicCounts = new int[numTopics];
tokensPerTopic = new int[numTopics];
formatter = NumberFormat.getInstance();
formatter.setMaximumFractionDigits(5);
System.err.println("LDA: " + numTopics + " topics");
}
public Alphabet getAlphabet() { return alphabet; }
public LabelAlphabet getTopicAlphabet() { return topicAlphabet; }
public int getNumTopics() { return numTopics; }
public ArrayList<Topication> getData() { return data; }
public int getCountFeatureTopic (int featureIndex, int topicIndex) { return typeTopicCounts[featureIndex].get(topicIndex); }
public int getCountTokensPerTopic (int topicIndex) { return tokensPerTopic[topicIndex]; }
/** Held-out instances for empirical likelihood calculation */
public void setTestingInstances(InstanceList testing) {
this.testing = testing;
}
public void setNumIterations (int numIterations) {
this.numIterations = numIterations;
}
public void setBurninPeriod (int burninPeriod) {
this.burninPeriod = burninPeriod;
}
public void setTopicDisplay(int interval, int n) {
this.showTopicsInterval = interval;
this.wordsPerTopic = n;
}
public void setRandomSeed(int seed) {
random = new Randoms(seed);
}
public void setOptimizeInterval(int interval) {
this.optimizeInterval = interval;
}
public void setModelOutput(int interval, String filename) {
this.outputModelInterval = interval;
this.outputModelFilename = filename;
}
/** Define how often and where to save the state
*
* @param interval Save a copy of the state every <code>interval</code> iterations.
* @param filename Save the state to this file, with the iteration number as a suffix
*/
public void setSaveState(int interval, String filename) {
this.saveStateInterval = interval;
this.stateFilename = filename;
}
protected int instanceLength (Instance instance) {
return ((FeatureSequence)instance.getData()).size();
}
// Can be safely called multiple times. This method will complain if it can't handle the situation
private void initializeForTypes (Alphabet alphabet) {
if (this.alphabet == null) {
this.alphabet = alphabet;
this.numTypes = alphabet.size();
this.typeTopicCounts = new TIntIntHashMap[numTypes];
for (int fi = 0; fi < numTypes; fi++)
typeTopicCounts[fi] = new TIntIntHashMap();
this.betaSum = beta * numTypes;
} else if (alphabet != this.alphabet) {
throw new IllegalArgumentException ("Cannot change Alphabet.");
} else if (alphabet.size() != this.numTypes) {
this.numTypes = alphabet.size();
TIntIntHashMap[] newTypeTopicCounts = new TIntIntHashMap[numTypes];
for (int i = 0; i < typeTopicCounts.length; i++)
newTypeTopicCounts[i] = typeTopicCounts[i];
for (int i = typeTopicCounts.length; i < numTypes; i++)
newTypeTopicCounts[i] = new TIntIntHashMap();
// TODO AKM July 18: Why wasn't the next line there previously?
// this.typeTopicCounts = newTypeTopicCounts;
this.betaSum = beta * numTypes;
} // else, nothing changed, nothing to be done
}
private void initializeTypeTopicCounts () {
TIntIntHashMap[] newTypeTopicCounts = new TIntIntHashMap[numTypes];
for (int i = 0; i < typeTopicCounts.length; i++)
newTypeTopicCounts[i] = typeTopicCounts[i];
for (int i = typeTopicCounts.length; i < numTypes; i++)
newTypeTopicCounts[i] = new TIntIntHashMap();
this.typeTopicCounts = newTypeTopicCounts;
}
public void addInstances (InstanceList training) {
initializeForTypes (training.getDataAlphabet());
ArrayList<LabelSequence> topicSequences = new ArrayList<LabelSequence>();
for (Instance instance : training) {
LabelSequence topicSequence = new LabelSequence(topicAlphabet, new int[instanceLength(instance)]);
if (false)
// This method not yet obeying its last "false" argument, and must be for this to work
sampleTopicsForOneDoc((FeatureSequence)instance.getData(), topicSequence, false, false);
else {
Randoms r = new Randoms();
int[] topics = topicSequence.getFeatures();
for (int i = 0; i < topics.length; i++)
topics[i] = r.nextInt(numTopics);
}
topicSequences.add (topicSequence);
}
addInstances (training, topicSequences);
}
public void addInstances (InstanceList training, List<LabelSequence> topics) {
initializeForTypes (training.getDataAlphabet());
assert (training.size() == topics.size());
for (int i = 0; i < training.size(); i++) {
Topication t = new Topication (training.get(i), this, topics.get(i));
data.add (t);
// Include sufficient statistics for this one doc
FeatureSequence tokenSequence = (FeatureSequence) t.instance.getData();
LabelSequence topicSequence = t.topicSequence;
for (int pi = 0; pi < topicSequence.getLength(); pi++) {
int topic = topicSequence.getIndexAtPosition(pi);
typeTopicCounts[tokenSequence.getIndexAtPosition(pi)].adjustOrPutValue(topic, 1, 1);
tokensPerTopic[topic]++;
}
}
initializeHistogramsAndCachedValues();
}
/**
* Gather statistics on the size of documents
* and create histograms for use in Dirichlet hyperparameter
* optimization.
*/
protected void initializeHistogramsAndCachedValues() {
int maxTokens = 0;
int totalTokens = 0;
int seqLen;
for (int doc = 0; doc < data.size(); doc++) {
FeatureSequence fs = (FeatureSequence) data.get(doc).instance.getData();
seqLen = fs.getLength();
if (seqLen > maxTokens)
maxTokens = seqLen;
totalTokens += seqLen;
}
// Initialize the smoothing-only sampling bucket
smoothingOnlyMass = 0;
for (int topic = 0; topic < numTopics; topic++)
smoothingOnlyMass += alpha[topic] * beta / (tokensPerTopic[topic] + betaSum);
// Initialize the cached coefficients, using only smoothing.
cachedCoefficients = new double[ numTopics ];
for (int topic=0; topic < numTopics; topic++)
cachedCoefficients[topic] = alpha[topic] / (tokensPerTopic[topic] + betaSum);
System.err.println("max tokens: " + maxTokens);
System.err.println("total tokens: " + totalTokens);
docLengthCounts = new int[maxTokens + 1];
topicDocCounts = new int[numTopics][maxTokens + 1];
}
public void estimate () throws IOException {
estimate (numIterations);
}
public void estimate (int iterationsThisRound) throws IOException {
long startTime = System.currentTimeMillis();
int maxIteration = iterationsSoFar + iterationsThisRound;
for ( ; iterationsSoFar <= maxIteration; iterationsSoFar++) {
long iterationStart = System.currentTimeMillis();
if (showTopicsInterval != 0 && iterationsSoFar != 0 && iterationsSoFar % showTopicsInterval == 0) {
System.out.println();
printTopWords (System.out, wordsPerTopic, false);
if (testing != null) {
double el = empiricalLikelihood(1000, testing);
double ll = modelLogLikelihood();
double mi = topicLabelMutualInformation();
System.out.println(ll + "\t" + el + "\t" + mi);
}
}
if (saveStateInterval != 0 && iterationsSoFar % saveStateInterval == 0) {
this.printState(new File(stateFilename + '.' + iterationsSoFar));
}
/*
if (outputModelInterval != 0 && iterations % outputModelInterval == 0) {
this.write (new File(outputModelFilename+'.'+iterations));
}
*/
// TODO this condition should also check that we have more than one sample to work with here
// (The number of samples actually obtained is not yet tracked.)
if (iterationsSoFar > burninPeriod && optimizeInterval != 0 &&
iterationsSoFar % optimizeInterval == 0) {
alphaSum = Dirichlet.learnParameters(alpha, topicDocCounts, docLengthCounts);
smoothingOnlyMass = 0.0;
for (int topic = 0; topic < numTopics; topic++) {
smoothingOnlyMass += alpha[topic] * beta / (tokensPerTopic[topic] + betaSum);
cachedCoefficients[topic] = alpha[topic] / (tokensPerTopic[topic] + betaSum);
}
clearHistograms();
}
// Loop over every document in the corpus
topicTermCount = betaTopicCount = smoothingOnlyCount = 0;
int numDocs = data.size(); // TODO consider beginning by sub-sampling?
for (int di = 0; di < numDocs; di++) {
FeatureSequence tokenSequence = (FeatureSequence) data.get(di).instance.getData();
LabelSequence topicSequence = (LabelSequence) data.get(di).topicSequence;
sampleTopicsForOneDoc (tokenSequence, topicSequence,
iterationsSoFar >= burninPeriod && iterationsSoFar % saveSampleInterval == 0,
true);
}
long elapsedMillis = System.currentTimeMillis() - iterationStart;
if (elapsedMillis < 1000) {
System.out.print(elapsedMillis + "ms ");
}
else {
System.out.print((elapsedMillis/1000) + "s ");
}
//System.out.println(topicTermCount + "\t" + betaTopicCount + "\t" + smoothingOnlyCount);
if (iterationsSoFar % 10 == 0) {
System.out.println ("<" + iterationsSoFar + "> ");
if (printLogLikelihood) System.out.println (modelLogLikelihood());
}
System.out.flush();
}
long seconds = Math.round((System.currentTimeMillis() - startTime)/1000.0);
long minutes = seconds / 60; seconds %= 60;
long hours = minutes / 60; minutes %= 60;
long days = hours / 24; hours %= 24;
System.out.print ("\nTotal time: ");
if (days != 0) { System.out.print(days); System.out.print(" days "); }
if (hours != 0) { System.out.print(hours); System.out.print(" hours "); }
if (minutes != 0) { System.out.print(minutes); System.out.print(" minutes "); }
System.out.print(seconds); System.out.println(" seconds");
}
private void clearHistograms() {
Arrays.fill(docLengthCounts, 0);
for (int topic = 0; topic < topicDocCounts.length; topic++)
Arrays.fill(topicDocCounts[topic], 0);
}
/** If topicSequence assignments are already set and accounted for in sufficient statistics,
* then readjustTopicsAndStats should be true. The topics will be re-sampled and sufficient statistics changes.
* If operating on a new or a test document, and featureSequence & topicSequence are not already accounted for in the sufficient statistics,
* then readjustTopicsAndStats should be false. The current topic assignments will be ignored, and the sufficient statistics
* will not be changed.
* If you want to estimate the Dirichlet alpha based on the per-document topic multinomials sampled this round,
* then saveStateForAlphaEstimation should be true. */
private void oldSampleTopicsForOneDoc (FeatureSequence featureSequence,
FeatureSequence topicSequence,
boolean saveStateForAlphaEstimation, boolean readjustTopicsAndStats)
{
long startTime = System.currentTimeMillis();
int[] oneDocTopics = topicSequence.getFeatures();
TIntIntHashMap currentTypeTopicCounts;
int type, oldTopic, newTopic;
double[] topicDistribution;
double topicDistributionSum;
int docLen = featureSequence.getLength();
int adjustedValue;
int[] topicIndices, topicCounts;
double weight;
// populate topic counts
Arrays.fill(oneDocTopicCounts, 0);
if (readjustTopicsAndStats) {
for (int token = 0; token < docLen; token++) {
oneDocTopicCounts[ oneDocTopics[token] ]++;
}
}
// Iterate over the tokens (words) in the document
for (int token = 0; token < docLen; token++) {
type = featureSequence.getIndexAtPosition(token);
oldTopic = oneDocTopics[token];
currentTypeTopicCounts = typeTopicCounts[type];
assert (currentTypeTopicCounts.size() != 0);
if (readjustTopicsAndStats) {
// Remove this token from all counts
oneDocTopicCounts[oldTopic]--;
adjustedValue = currentTypeTopicCounts.adjustOrPutValue(oldTopic, -1, -1);
if (adjustedValue == 0) currentTypeTopicCounts.remove(oldTopic);
else if (adjustedValue == -1) throw new IllegalStateException ("Token count in topic went negative.");
tokensPerTopic[oldTopic]--;
}
// Build a distribution over topics for this token
topicIndices = currentTypeTopicCounts.keys();
topicCounts = currentTypeTopicCounts.getValues();
topicDistribution = new double[topicIndices.length];
// TODO Yipes, memory allocation in the inner loop! But note that .keys and .getValues is doing this too.
topicDistributionSum = 0;
for (int i = 0; i < topicCounts.length; i++) {
int topic = topicIndices[i];
weight = ((topicCounts[i] + beta) / (tokensPerTopic[topic] + betaSum)) * ((oneDocTopicCounts[topic] + alpha[topic]));
topicDistributionSum += weight;
topicDistribution[topic] = weight;
}
// Sample a topic assignment from this distribution
newTopic = topicIndices[random.nextDiscrete (topicDistribution, topicDistributionSum)];
if (readjustTopicsAndStats) {
// Put that new topic into the counts
oneDocTopics[token] = newTopic;
oneDocTopicCounts[newTopic]++;
typeTopicCounts[type].adjustOrPutValue(newTopic, 1, 1);
tokensPerTopic[newTopic]++;
}
}
if (saveStateForAlphaEstimation) {
// Update the document-topic count histogram, for dirichlet estimation
docLengthCounts[ docLen ]++;
for (int topic=0; topic < numTopics; topic++) {
topicDocCounts[topic][ oneDocTopicCounts[topic] ]++;
}
}
}
protected void sampleTopicsForOneDoc (FeatureSequence tokenSequence,
FeatureSequence topicSequence,
boolean shouldSaveState,
boolean readjustTopicsAndStats /* currently ignored */) {
int[] oneDocTopics = topicSequence.getFeatures();
TIntIntHashMap currentTypeTopicCounts;
int type, oldTopic, newTopic;
double topicWeightsSum;
int docLength = tokenSequence.getLength();
// populate topic counts
TIntIntHashMap localTopicCounts = new TIntIntHashMap();
for (int position = 0; position < docLength; position++) {
localTopicCounts.adjustOrPutValue(oneDocTopics[position], 1, 1);
}
// Initialize the topic count/beta sampling bucket
double topicBetaMass = 0.0;
for (int topic: localTopicCounts.keys()) {
int n = localTopicCounts.get(topic);
// initialize the normalization constant for the (B * n_{t|d}) term
topicBetaMass += beta * n / (tokensPerTopic[topic] + betaSum);
// update the coefficients for the non-zero topics
cachedCoefficients[topic] = (alpha[topic] + n) / (tokensPerTopic[topic] + betaSum);
}
double topicTermMass = 0.0;
double[] topicTermScores = new double[numTopics];
int[] topicTermIndices;
int[] topicTermValues;
int i;
double score;
// Iterate over the positions (words) in the document
for (int position = 0; position < docLength; position++) {
type = tokenSequence.getIndexAtPosition(position);
oldTopic = oneDocTopics[position];
currentTypeTopicCounts = typeTopicCounts[type];
assert(currentTypeTopicCounts.get(oldTopic) >= 0);
// Remove this token from all counts.
// Note that we actually want to remove the key if it goes
// to zero, not set it to 0.
if (currentTypeTopicCounts.get(oldTopic) == 1) {
currentTypeTopicCounts.remove(oldTopic);
}
else {
currentTypeTopicCounts.adjustValue(oldTopic, -1);
}
smoothingOnlyMass -= alpha[oldTopic] * beta /
(tokensPerTopic[oldTopic] + betaSum);
topicBetaMass -= beta * localTopicCounts.get(oldTopic) /
(tokensPerTopic[oldTopic] + betaSum);
if (localTopicCounts.get(oldTopic) == 1) {
localTopicCounts.remove(oldTopic);
}
else {
localTopicCounts.adjustValue(oldTopic, -1);
}
tokensPerTopic[oldTopic]--;
smoothingOnlyMass += alpha[oldTopic] * beta /
(tokensPerTopic[oldTopic] + betaSum);
topicBetaMass += beta * localTopicCounts.get(oldTopic) /
(tokensPerTopic[oldTopic] + betaSum);
cachedCoefficients[oldTopic] =
(alpha[oldTopic] + localTopicCounts.get(oldTopic)) /
(tokensPerTopic[oldTopic] + betaSum);
topicTermMass = 0.0;
topicTermIndices = currentTypeTopicCounts.keys();
topicTermValues = currentTypeTopicCounts.getValues();
for (i=0; i < topicTermIndices.length; i++) {
int topic = topicTermIndices[i];
score =
cachedCoefficients[topic] * topicTermValues[i];
// ((alpha[topic] + localTopicCounts.get(topic)) *
// topicTermValues[i]) /
// (tokensPerTopic[topic] + betaSum);
// Note: I tried only doing this next bit if
// score > 0, but it didn't make any difference,
// at least in the first few iterations.
topicTermMass += score;
topicTermScores[i] = score;
// topicTermIndices[i] = topic;
}
// indicate that this is the last topic
// topicTermIndices[i] = -1;
double sample = random.nextUniform() * (smoothingOnlyMass + topicBetaMass + topicTermMass);
double origSample = sample;
// Make sure it actually gets set
newTopic = -1;
if (sample < topicTermMass) {
//topicTermCount++;
i = -1;
while (sample > 0) {
i++;
sample -= topicTermScores[i];
}
newTopic = topicTermIndices[i];
}
else {
sample -= topicTermMass;
if (sample < topicBetaMass) {
//betaTopicCount++;
sample /= beta;
topicTermIndices = localTopicCounts.keys();
topicTermValues = localTopicCounts.getValues();
for (i=0; i < topicTermIndices.length; i++) {
newTopic = topicTermIndices[i];
sample -= topicTermValues[i] /
(tokensPerTopic[newTopic] + betaSum);
if (sample <= 0.0) {
break;
}
}
}
else {
//smoothingOnlyCount++;
sample -= topicBetaMass;
sample /= beta;
for (int topic = 0; topic < numTopics; topic++) {
sample -= alpha[topic] /
(tokensPerTopic[topic] + betaSum);
if (sample <= 0.0) {
newTopic = topic;
break;
}
}
}
}
if (newTopic == -1) {
System.err.println("LDAHyper sampling error: "+ origSample + " " + sample + " " + smoothingOnlyMass + " " +
topicBetaMass + " " + topicTermMass);
newTopic = numTopics-1; // TODO is this appropriate
//throw new IllegalStateException ("LDAHyper: New topic not sampled.");
}
//assert(newTopic != -1);
// Put that new topic into the counts
oneDocTopics[position] = newTopic;
currentTypeTopicCounts.adjustOrPutValue(newTopic, 1, 1);
smoothingOnlyMass -= alpha[newTopic] * beta /
(tokensPerTopic[newTopic] + betaSum);
topicBetaMass -= beta * localTopicCounts.get(newTopic) /
(tokensPerTopic[newTopic] + betaSum);
localTopicCounts.adjustOrPutValue(newTopic, 1, 1);
tokensPerTopic[newTopic]++;
// update the coefficients for the non-zero topics
cachedCoefficients[newTopic] =
(alpha[newTopic] + localTopicCounts.get(newTopic)) /
(tokensPerTopic[newTopic] + betaSum);
smoothingOnlyMass += alpha[newTopic] * beta /
(tokensPerTopic[newTopic] + betaSum);
topicBetaMass += beta * localTopicCounts.get(newTopic) /
(tokensPerTopic[newTopic] + betaSum);
assert(currentTypeTopicCounts.get(newTopic) >= 0);
}
// Clean up our mess: reset the coefficients to values with only
// smoothing. The next doc will update its own non-zero topics...
for (int topic: localTopicCounts.keys()) {
cachedCoefficients[topic] =
alpha[topic] / (tokensPerTopic[topic] + betaSum);
}
if (shouldSaveState) {
// Update the document-topic count histogram,
// for dirichlet estimation
docLengthCounts[ docLength ]++;
for (int topic: localTopicCounts.keys()) {
topicDocCounts[topic][ localTopicCounts.get(topic) ]++;
}
}
}
public IDSorter[] getSortedTopicWords(int topic) {
IDSorter[] sortedTypes = new IDSorter[ numTypes ];
for (int type = 0; type < numTypes; type++)
sortedTypes[type] = new IDSorter(type, typeTopicCounts[type].get(topic));
Arrays.sort(sortedTypes);
return sortedTypes;
}
public void printTopWords (File file, int numWords, boolean useNewLines) throws IOException {
PrintStream out = new PrintStream (file);
printTopWords(out, numWords, useNewLines);
out.close();
}
// TreeSet implementation is ~70x faster than RankedFeatureVector -DM
public void printTopWords (PrintStream out, int numWords, boolean usingNewLines) {
for (int topic = 0; topic < numTopics; topic++) {
TreeSet<IDSorter> sortedWords = new TreeSet<IDSorter>();
for (int type = 0; type < numTypes; type++) {
if (typeTopicCounts[type].containsKey(topic)) {
sortedWords.add(new IDSorter(type, typeTopicCounts[type].get(topic)));
}
}
if (usingNewLines) {
out.println ("Topic " + topic);
int word = 1;
Iterator<IDSorter> iterator = sortedWords.iterator();
while (iterator.hasNext() && word < numWords) {
IDSorter info = iterator.next();
out.println(alphabet.lookupObject(info.getID()) + "\t" +
(int) info.getWeight());
word++;
}
}
else {
out.print (topic + "\t" + formatter.format(alpha[topic]) + "\t" + tokensPerTopic[topic] + "\t");
int word = 1;
Iterator<IDSorter> iterator = sortedWords.iterator();
while (iterator.hasNext() && word < numWords) {
IDSorter info = iterator.next();
out.print(alphabet.lookupObject(info.getID()) + " ");
word++;
}
out.println();
}
}
}
public void topicXMLReport (PrintWriter out, int numWords) {
out.println("<?xml version='1.0' ?>");
out.println("<topicModel>");
for (int topic = 0; topic < numTopics; topic++) {
out.println(" <topic id='" + topic + "' alpha='" + alpha[topic] +
"' totalTokens='" + tokensPerTopic[topic] + "'>");
TreeSet<IDSorter> sortedWords = new TreeSet<IDSorter>();
for (int type = 0; type < numTypes; type++) {
if (typeTopicCounts[type].containsKey(topic)) {
sortedWords.add(new IDSorter(type, typeTopicCounts[type].get(topic)));
}
}
int word = 1;
Iterator<IDSorter> iterator = sortedWords.iterator();
while (iterator.hasNext() && word < numWords) {
IDSorter info = iterator.next();
out.println(" <word rank='" + word + "'>" +
alphabet.lookupObject(info.getID()) +
"</word>");
word++;
}
out.println(" </topic>");
}
out.println("</topicModel>");
}
public void topicXMLReportPhrases (PrintStream out, int numWords) {
int numTopics = this.getNumTopics();
gnu.trove.TObjectIntHashMap<String>[] phrases = new gnu.trove.TObjectIntHashMap[numTopics];
Alphabet alphabet = this.getAlphabet();
// Get counts of phrases
for (int ti = 0; ti < numTopics; ti++)
phrases[ti] = new gnu.trove.TObjectIntHashMap<String>();
for (int di = 0; di < this.getData().size(); di++) {
LDAHyper.Topication t = this.getData().get(di);
Instance instance = t.instance;
FeatureSequence fvs = (FeatureSequence) instance.getData();
boolean withBigrams = false;
if (fvs instanceof FeatureSequenceWithBigrams) withBigrams = true;
int prevtopic = -1;
int prevfeature = -1;
int topic = -1;
StringBuffer sb = null;
int feature = -1;
int doclen = fvs.size();
for (int pi = 0; pi < doclen; pi++) {
feature = fvs.getIndexAtPosition(pi);
topic = this.getData().get(di).topicSequence.getIndexAtPosition(pi);
if (topic == prevtopic && (!withBigrams || ((FeatureSequenceWithBigrams)fvs).getBiIndexAtPosition(pi) != -1)) {
if (sb == null)
sb = new StringBuffer (alphabet.lookupObject(prevfeature).toString() + " " + alphabet.lookupObject(feature));
else {
sb.append (" ");
sb.append (alphabet.lookupObject(feature));
}
} else if (sb != null) {
String sbs = sb.toString();
//System.out.println ("phrase:"+sbs);
if (phrases[prevtopic].get(sbs) == 0)
phrases[prevtopic].put(sbs,0);
phrases[prevtopic].increment(sbs);
prevtopic = prevfeature = -1;
sb = null;
} else {
prevtopic = topic;
prevfeature = feature;
}
}
}
// phrases[] now filled with counts
// Now start printing the XML
out.println("<?xml version='1.0' ?>");
out.println("<topics>");
double[] probs = new double[alphabet.size()];
for (int ti = 0; ti < numTopics; ti++) {
out.print(" <topic id=\"" + ti + "\" alpha=\"" + alpha[ti] +
"\" totalTokens=\"" + tokensPerTopic[ti] + "\" ");
// For gathering <term> and <phrase> output temporarily
// so that we can get topic-title information before printing it to "out".
ByteArrayOutputStream bout = new ByteArrayOutputStream();
PrintStream pout = new PrintStream (bout);
// For holding candidate topic titles
AugmentableFeatureVector titles = new AugmentableFeatureVector (new Alphabet());
// Print words
for (int type = 0; type < alphabet.size(); type++)
probs[type] = this.getCountFeatureTopic(type, ti) / (double)this.getCountTokensPerTopic(ti);
RankedFeatureVector rfv = new RankedFeatureVector (alphabet, probs);
for (int ri = 0; ri < numWords; ri++) {
int fi = rfv.getIndexAtRank(ri);
pout.println (" <term weight=\""+probs[fi]+"\" count=\""+this.getCountFeatureTopic(fi,ti)+"\">"+alphabet.lookupObject(fi)+ "</term>");
if (ri < 20) // consider top 20 individual words as candidate titles
titles.add(alphabet.lookupObject(fi), this.getCountFeatureTopic(fi,ti));
}
// Print phrases
Object[] keys = phrases[ti].keys();
int[] values = phrases[ti].getValues();
double counts[] = new double[keys.length];
for (int i = 0; i < counts.length; i++) counts[i] = values[i];
double countssum = MatrixOps.sum (counts);
Alphabet alph = new Alphabet(keys);
rfv = new RankedFeatureVector (alph, counts);
//out.println ("topic "+ti);
int max = rfv.numLocations() < numWords ? rfv.numLocations() : numWords;
//System.out.println ("topic "+ti+" numPhrases="+rfv.numLocations());
for (int ri = 0; ri < max; ri++) {
int fi = rfv.getIndexAtRank(ri);
pout.println (" <phrase weight=\""+counts[fi]/countssum+"\" count=\""+values[fi]+"\">"+alph.lookupObject(fi)+ "</phrase>");
// Any phrase count less than 20 is simply unreliable
if (ri < 20 && values[fi] > 20)
titles.add(alph.lookupObject(fi), 100*values[fi]); // prefer phrases with a factor of 100
}
// Select candidate titles
StringBuffer titlesStringBuffer = new StringBuffer();
rfv = new RankedFeatureVector (titles.getAlphabet(), titles);
int numTitles = 10;
for (int ri = 0; ri < numTitles && ri < rfv.numLocations(); ri++) {
// Don't add redundant titles
if (titlesStringBuffer.indexOf(rfv.getObjectAtRank(ri).toString()) == -1) {
titlesStringBuffer.append (rfv.getObjectAtRank(ri));
if (ri < numTitles-1)
titlesStringBuffer.append (", ");
} else
numTitles++;
}
out.println("titles=\"" + titlesStringBuffer.toString() + "\">");
out.print(pout.toString());
out.println(" </topic>");
}
out.println("</topics>");
}
public void printDocumentTopics (File f) throws IOException {
printDocumentTopics (new PrintWriter (new FileWriter (f) ) );
}
public void printDocumentTopics (PrintWriter pw) {
printDocumentTopics (pw, 0.0, -1);
}
/**
* @param pw A print writer
* @param threshold Only print topics with proportion greater than this number
* @param max Print no more than this many topics
*/
public void printDocumentTopics (PrintWriter pw, double threshold, int max) {
pw.print ("#doc source topic proportion ...\n");
int docLen;
int[] topicCounts = new int[ numTopics ];
IDSorter[] sortedTopics = new IDSorter[ numTopics ];
for (int topic = 0; topic < numTopics; topic++) {
// Initialize the sorters with dummy values
sortedTopics[topic] = new IDSorter(topic, topic);
}
if (max < 0 || max > numTopics) {
max = numTopics;
}
for (int di = 0; di < data.size(); di++) {
LabelSequence topicSequence = (LabelSequence) data.get(di).topicSequence;
int[] currentDocTopics = topicSequence.getFeatures();
pw.print (di); pw.print (' ');
if (data.get(di).instance.getSource() != null) {
pw.print (data.get(di).instance.getSource());
}
else {
pw.print ("null-source");
}
pw.print (' ');
docLen = currentDocTopics.length;
// Count up the tokens
for (int token=0; token < docLen; token++) {
topicCounts[ currentDocTopics[token] ]++;
}
// And normalize
for (int topic = 0; topic < numTopics; topic++) {
sortedTopics[topic].set(topic, (float) topicCounts[topic] / docLen);
}
Arrays.sort(sortedTopics);
for (int i = 0; i < max; i++) {
if (sortedTopics[i].getWeight() < threshold) { break; }
pw.print (sortedTopics[i].getID() + " " +
sortedTopics[i].getWeight() + " ");
}
pw.print (" \n");
Arrays.fill(topicCounts, 0);
}
}
public void printState (File f) throws IOException {
PrintStream out =
new PrintStream(new GZIPOutputStream(new BufferedOutputStream(new FileOutputStream(f))));
printState(out);
out.close();
}
public void printState (PrintStream out) {
out.println ("#doc source pos typeindex type topic");
for (int di = 0; di < data.size(); di++) {
FeatureSequence tokenSequence = (FeatureSequence) data.get(di).instance.getData();
LabelSequence topicSequence = (LabelSequence) data.get(di).topicSequence;
String source = "NA";
if (data.get(di).instance.getSource() != null) {
source = data.get(di).instance.getSource().toString();
}
for (int pi = 0; pi < topicSequence.getLength(); pi++) {
int type = tokenSequence.getIndexAtPosition(pi);
int topic = topicSequence.getIndexAtPosition(pi);
out.print(di); out.print(' ');
out.print(source); out.print(' ');
out.print(pi); out.print(' ');
out.print(type); out.print(' ');
out.print(alphabet.lookupObject(type)); out.print(' ');
out.print(topic); out.println();
}
}
}
// Turbo topics
/*
private class CorpusWordCounts {
Alphabet unigramAlphabet;
FeatureCounter unigramCounts = new FeatureCounter(unigramAlphabet);
public CorpusWordCounts(Alphabet alphabet) {
unigramAlphabet = alphabet;
}
private double mylog(double x) { return (x == 0) ? -1000000.0 : Math.log(x); }
// The likelihood ratio significance test
private double significanceTest(int thisUnigramCount, int nextUnigramCount, int nextBigramCount, int nextTotalCount, int minCount) {
if (nextBigramCount < minCount) return -1.0;
assert(nextUnigramCount >= nextBigramCount);
double log_pi_vu = mylog(nextBigramCount) - mylog(thisUnigramCount);
double log_pi_vnu = mylog(nextUnigramCount - nextBigramCount) - mylog(nextTotalCount - nextBigramCount);
double log_pi_v_old = mylog(nextUnigramCount) - mylog(nextTotalCount);
double log_1mp_v = mylog(1 - Math.exp(log_pi_vnu));
double log_1mp_vu = mylog(1 - Math.exp(log_pi_vu));
return 2 * (nextBigramCount * log_pi_vu +
(nextUnigramCount - nextBigramCount) * log_pi_vnu -
nextUnigramCount * log_pi_v_old +
(thisUnigramCount- nextBigramCount) * (log_1mp_vu - log_1mp_v));
}
public int[] significantBigrams(int word) {
}
}
*/
public void write (File f) {
try {
ObjectOutputStream oos = new ObjectOutputStream (new FileOutputStream(f));
oos.writeObject(this);
oos.close();
}
catch (IOException e) {
System.err.println("LDAHyper.write: Exception writing LDAHyper to file " + f + ": " + e);
}
}
public static LDAHyper read (File f) {
LDAHyper lda = null;
try {
ObjectInputStream ois = new ObjectInputStream (new FileInputStream(f));
lda = (LDAHyper) ois.readObject();
lda.initializeTypeTopicCounts(); // To work around a bug in Trove?
ois.close();
}
catch (IOException e) {
System.err.println("Exception reading file " + f + ": " + e);
}
catch (ClassNotFoundException e) {
System.err.println("Exception reading file " + f + ": " + e);
}
return lda;
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private static final int NULL_INTEGER = -1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
// Instance lists
out.writeObject (data);
out.writeObject (alphabet);
out.writeObject (topicAlphabet);
out.writeInt (numTopics);
out.writeObject (alpha);
out.writeDouble (beta);
out.writeDouble (betaSum);
out.writeDouble(smoothingOnlyMass);
out.writeObject(cachedCoefficients);
out.writeInt(iterationsSoFar);
out.writeInt(numIterations);
out.writeInt(burninPeriod);
out.writeInt(saveSampleInterval);
out.writeInt(optimizeInterval);
out.writeInt(showTopicsInterval);
out.writeInt(wordsPerTopic);
out.writeInt(outputModelInterval);
out.writeObject(outputModelFilename);
out.writeInt(saveStateInterval);
out.writeObject(stateFilename);
out.writeObject(random);
out.writeObject(formatter);
out.writeBoolean(printLogLikelihood);
out.writeObject(docLengthCounts);
out.writeObject(topicDocCounts);
for (int fi = 0; fi < numTypes; fi++)
out.writeObject (typeTopicCounts[fi]);
for (int ti = 0; ti < numTopics; ti++)
out.writeInt (tokensPerTopic[ti]);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int featuresLength;
int version = in.readInt ();
data = (ArrayList<Topication>) in.readObject ();
alphabet = (Alphabet) in.readObject();
topicAlphabet = (LabelAlphabet) in.readObject();
numTopics = in.readInt();
alpha = (double[]) in.readObject();
beta = in.readDouble();
betaSum = in.readDouble();
smoothingOnlyMass = in.readDouble();
cachedCoefficients = (double[]) in.readObject();
iterationsSoFar = in.readInt();
numIterations = in.readInt();
burninPeriod = in.readInt();
saveSampleInterval = in.readInt();
optimizeInterval = in.readInt();
showTopicsInterval = in.readInt();
wordsPerTopic = in.readInt();
outputModelInterval = in.readInt();
outputModelFilename = (String) in.readObject();
saveStateInterval = in.readInt();
stateFilename = (String) in.readObject();
random = (Randoms) in.readObject();
formatter = (NumberFormat) in.readObject();
printLogLikelihood = in.readBoolean();
docLengthCounts = (int[]) in.readObject();
topicDocCounts = (int[][]) in.readObject();
int numDocs = data.size();
this.numTypes = alphabet.size();
typeTopicCounts = new TIntIntHashMap[numTypes];
for (int fi = 0; fi < numTypes; fi++)
typeTopicCounts[fi] = (TIntIntHashMap) in.readObject();
tokensPerTopic = new int[numTopics];
for (int ti = 0; ti < numTopics; ti++)
tokensPerTopic[ti] = in.readInt();
}
public double topicLabelMutualInformation() {
int doc, level, label, topic, token, type;
int[] docTopics;
if (data.get(0).instance.getTargetAlphabet() == null) {
return 0.0;
}
int targetAlphabetSize = data.get(0).instance.getTargetAlphabet().size();
int[][] topicLabelCounts = new int[ numTopics ][ targetAlphabetSize ];
int[] topicCounts = new int[ numTopics ];
int[] labelCounts = new int[ targetAlphabetSize ];
int total = 0;
for (doc=0; doc < data.size(); doc++) {
label = data.get(doc).instance.getLabeling().getBestIndex();
LabelSequence topicSequence = (LabelSequence) data.get(doc).topicSequence;
docTopics = topicSequence.getFeatures();
for (token = 0; token < docTopics.length; token++) {
topic = docTopics[token];
topicLabelCounts[ topic ][ label ]++;
topicCounts[topic]++;
labelCounts[label]++;
total++;
}
}
/* // This block will print out the best topics for each label
IDSorter[] wp = new IDSorter[numTypes];
for (topic = 0; topic < numTopics; topic++) {
for (type = 0; type < numTypes; type++) {
wp[type] = new IDSorter (type, (((double) typeTopicCounts[type][topic]) /
tokensPerTopic[topic]));
}
Arrays.sort (wp);
StringBuffer terms = new StringBuffer();
for (int i = 0; i < 8; i++) {
terms.append(instances.getDataAlphabet().lookupObject(wp[i].id));
terms.append(" ");
}
System.out.println(terms);
for (label = 0; label < topicLabelCounts[topic].length; label++) {
System.out.println(topicLabelCounts[ topic ][ label ] + "\t" +
instances.getTargetAlphabet().lookupObject(label));
}
System.out.println();
}
*/
double topicEntropy = 0.0;
double labelEntropy = 0.0;
double jointEntropy = 0.0;
double p;
double log2 = Math.log(2);
for (topic = 0; topic < topicCounts.length; topic++) {
if (topicCounts[topic] == 0) { continue; }
p = (double) topicCounts[topic] / total;
topicEntropy -= p * Math.log(p) / log2;
}
for (label = 0; label < labelCounts.length; label++) {
if (labelCounts[label] == 0) { continue; }
p = (double) labelCounts[label] / total;
labelEntropy -= p * Math.log(p) / log2;
}
for (topic = 0; topic < topicCounts.length; topic++) {
for (label = 0; label < labelCounts.length; label++) {
if (topicLabelCounts[ topic ][ label ] == 0) { continue; }
p = (double) topicLabelCounts[ topic ][ label ] / total;
jointEntropy -= p * Math.log(p) / log2;
}
}
return topicEntropy + labelEntropy - jointEntropy;
}
public double empiricalLikelihood(int numSamples, InstanceList testing) {
double[][] likelihoods = new double[ testing.size() ][ numSamples ];
double[] multinomial = new double[numTypes];
double[] topicDistribution, currentSample, currentWeights;
Dirichlet topicPrior = new Dirichlet(alpha);
int sample, doc, topic, type, token, seqLen;
FeatureSequence fs;
for (sample = 0; sample < numSamples; sample++) {
topicDistribution = topicPrior.nextDistribution();
Arrays.fill(multinomial, 0.0);
for (topic = 0; topic < numTopics; topic++) {
for (type=0; type<numTypes; type++) {
multinomial[type] +=
topicDistribution[topic] *
(beta + typeTopicCounts[type].get(topic)) /
(betaSum + tokensPerTopic[topic]);
}
}
// Convert to log probabilities
for (type=0; type<numTypes; type++) {
assert(multinomial[type] > 0.0);
multinomial[type] = Math.log(multinomial[type]);
}
for (doc=0; doc<testing.size(); doc++) {
fs = (FeatureSequence) testing.get(doc).getData();
seqLen = fs.getLength();
for (token = 0; token < seqLen; token++) {
type = fs.getIndexAtPosition(token);
// Adding this check since testing instances may
// have types not found in training instances,
// as pointed out by Steven Bethard.
if (type < numTypes) {
likelihoods[doc][sample] += multinomial[type];
}
}
}
}
double averageLogLikelihood = 0.0;
double logNumSamples = Math.log(numSamples);
for (doc=0; doc<testing.size(); doc++) {
double max = Double.NEGATIVE_INFINITY;
for (sample = 0; sample < numSamples; sample++) {
if (likelihoods[doc][sample] > max) {
max = likelihoods[doc][sample];
}
}
double sum = 0.0;
for (sample = 0; sample < numSamples; sample++) {
sum += Math.exp(likelihoods[doc][sample] - max);
}
averageLogLikelihood += Math.log(sum) + max - logNumSamples;
}
return averageLogLikelihood;
}
public double modelLogLikelihood() {
double logLikelihood = 0.0;
int nonZeroTopics;
// The likelihood of the model is a combination of a
// Dirichlet-multinomial for the words in each topic
// and a Dirichlet-multinomial for the topics in each
// document.
// The likelihood function of a dirichlet multinomial is
// Gamma( sum_i alpha_i ) prod_i Gamma( alpha_i + N_i )
// prod_i Gamma( alpha_i ) Gamma( sum_i (alpha_i + N_i) )
// So the log likelihood is
// logGamma ( sum_i alpha_i ) - logGamma ( sum_i (alpha_i + N_i) ) +
// sum_i [ logGamma( alpha_i + N_i) - logGamma( alpha_i ) ]
// Do the documents first
int[] topicCounts = new int[numTopics];
double[] topicLogGammas = new double[numTopics];
int[] docTopics;
for (int topic=0; topic < numTopics; topic++) {
topicLogGammas[ topic ] = Dirichlet.logGammaStirling( alpha[topic] );
}
for (int doc=0; doc < data.size(); doc++) {
LabelSequence topicSequence = (LabelSequence) data.get(doc).topicSequence;
docTopics = topicSequence.getFeatures();
for (int token=0; token < docTopics.length; token++) {
topicCounts[ docTopics[token] ]++;
}
for (int topic=0; topic < numTopics; topic++) {
if (topicCounts[topic] > 0) {
logLikelihood += (Dirichlet.logGammaStirling(alpha[topic] + topicCounts[topic]) -
topicLogGammas[ topic ]);
}
}
// subtract the (count + parameter) sum term
logLikelihood -= Dirichlet.logGammaStirling(alphaSum + docTopics.length);
Arrays.fill(topicCounts, 0);
}
// add the parameter sum term
logLikelihood += data.size() * Dirichlet.logGammaStirling(alphaSum);
// And the topics
// Count the number of type-topic pairs
int nonZeroTypeTopics = 0;
for (int type=0; type < numTypes; type++) {
int[] usedTopics = typeTopicCounts[type].keys();
for (int topic : usedTopics) {
int count = typeTopicCounts[type].get(topic);
if (count > 0) {
nonZeroTypeTopics++;
logLikelihood +=
Dirichlet.logGammaStirling(beta + count);
}
}
}
for (int topic=0; topic < numTopics; topic++) {
logLikelihood -=
Dirichlet.logGammaStirling( (beta * numTopics) +
tokensPerTopic[ topic ] );
}
logLikelihood +=
(Dirichlet.logGammaStirling(beta * numTopics)) -
(Dirichlet.logGammaStirling(beta) * nonZeroTypeTopics);
return logLikelihood;
}
// Recommended to use mallet/bin/vectors2topics instead.
public static void main (String[] args) throws IOException {
InstanceList training = InstanceList.load (new File(args[0]));
int numTopics = args.length > 1 ? Integer.parseInt(args[1]) : 200;
InstanceList testing =
args.length > 2 ? InstanceList.load (new File(args[2])) : null;
LDAHyper lda = new LDAHyper (numTopics, 50.0, 0.01);
lda.printLogLikelihood = true;
lda.setTopicDisplay(50,7);
lda.addInstances(training);
lda.estimate();
}
}
| 47,240 | 32.081933 | 143 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/DMRTopicModel.java | package cc.mallet.topics;
import cc.mallet.optimize.LimitedMemoryBFGS;
import cc.mallet.types.*;
import cc.mallet.classify.MaxEnt;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.Noop;
import gnu.trove.TIntIntHashMap;
import java.io.IOException;
import java.io.PrintStream;
import java.io.File;
public class DMRTopicModel extends LDAHyper {
MaxEnt dmrParameters = null;
int numFeatures;
int defaultFeatureIndex;
Pipe parameterPipe = null;
double[][] alphaCache;
double[] alphaSumCache;
public DMRTopicModel(int numberOfTopics) {
super(numberOfTopics);
}
public void estimate (int iterationsThisRound) throws IOException {
numFeatures = data.get(0).instance.getTargetAlphabet().size() + 1;
defaultFeatureIndex = numFeatures - 1;
int numDocs = data.size(); // TODO consider beginning by sub-sampling?
alphaCache = new double[numDocs][numTopics];
alphaSumCache = new double[numDocs];
long startTime = System.currentTimeMillis();
int maxIteration = iterationsSoFar + iterationsThisRound;
for ( ; iterationsSoFar <= maxIteration; iterationsSoFar++) {
long iterationStart = System.currentTimeMillis();
if (showTopicsInterval != 0 && iterationsSoFar != 0 && iterationsSoFar % showTopicsInterval == 0) {
System.out.println();
printTopWords (System.out, wordsPerTopic, false);
}
if (saveStateInterval != 0 && iterationsSoFar % saveStateInterval == 0) {
this.printState(new File(stateFilename + '.' + iterationsSoFar + ".gz"));
}
if (iterationsSoFar > burninPeriod && optimizeInterval != 0 &&
iterationsSoFar % optimizeInterval == 0) {
// Train regression parameters
learnParameters();
}
// Loop over every document in the corpus
for (int doc = 0; doc < numDocs; doc++) {
FeatureSequence tokenSequence = (FeatureSequence) data.get(doc).instance.getData();
LabelSequence topicSequence = (LabelSequence) data.get(doc).topicSequence;
if (dmrParameters != null) {
// set appropriate Alpha parameters
setAlphas(data.get(doc).instance);
}
sampleTopicsForOneDoc (tokenSequence, topicSequence,
false, false);
}
long ms = System.currentTimeMillis() - iterationStart;
if (ms > 1000) {
System.out.print(Math.round(ms / 1000) + "s ");
}
else {
System.out.print(ms + "ms ");
}
if (iterationsSoFar % 10 == 0) {
System.out.println ("<" + iterationsSoFar + "> ");
if (printLogLikelihood) System.out.println (modelLogLikelihood());
}
System.out.flush();
}
long seconds = Math.round((System.currentTimeMillis() - startTime)/1000.0);
long minutes = seconds / 60; seconds %= 60;
long hours = minutes / 60; minutes %= 60;
long days = hours / 24; hours %= 24;
System.out.print ("\nTotal time: ");
if (days != 0) { System.out.print(days); System.out.print(" days "); }
if (hours != 0) { System.out.print(hours); System.out.print(" hours "); }
if (minutes != 0) { System.out.print(minutes); System.out.print(" minutes "); }
System.out.print(seconds); System.out.println(" seconds");
}
/**
* Use only the default features to set the topic prior (use no document features)
*/
public void setAlphas() {
double[] parameters = dmrParameters.getParameters();
alphaSum = 0.0;
smoothingOnlyMass = 0.0;
// Use only the default features to set the topic prior (use no document features)
for (int topic=0; topic < numTopics; topic++) {
alpha[topic] = Math.exp( parameters[ (topic * numFeatures) + defaultFeatureIndex ] );
alphaSum += alpha[topic];
smoothingOnlyMass += alpha[topic] * beta / (tokensPerTopic[topic] + betaSum);
cachedCoefficients[topic] = alpha[topic] / (tokensPerTopic[topic] + betaSum);
}
}
/** This method sets the alphas for a hypothetical "document" that contains
* a single non-default feature.
*/
public void setAlphas(int featureIndex) {
double[] parameters = dmrParameters.getParameters();
alphaSum = 0.0;
smoothingOnlyMass = 0.0;
// Use only the default features to set the topic prior (use no document features)
for (int topic=0; topic < numTopics; topic++) {
alpha[topic] = Math.exp(parameters[ (topic * numFeatures) + featureIndex ] +
parameters[ (topic * numFeatures) + defaultFeatureIndex ] );
alphaSum += alpha[topic];
smoothingOnlyMass += alpha[topic] * beta / (tokensPerTopic[topic] + betaSum);
cachedCoefficients[topic] = alpha[topic] / (tokensPerTopic[topic] + betaSum);
}
}
/**
* Set alpha based on features in an instance
*/
public void setAlphas(Instance instance) {
// we can't use the standard score functions from MaxEnt,
// since our features are currently in the Target.
FeatureVector features = (FeatureVector) instance.getTarget();
if (features == null) { setAlphas(); return; }
double[] parameters = dmrParameters.getParameters();
alphaSum = 0.0;
smoothingOnlyMass = 0.0;
for (int topic = 0; topic < numTopics; topic++) {
alpha[topic] = parameters[topic*numFeatures + defaultFeatureIndex]
+ MatrixOps.rowDotProduct (parameters,
numFeatures,
topic, features,
defaultFeatureIndex,
null);
alpha[topic] = Math.exp(alpha[topic]);
alphaSum += alpha[topic];
smoothingOnlyMass += alpha[topic] * beta / (tokensPerTopic[topic] + betaSum);
cachedCoefficients[topic] = alpha[topic] / (tokensPerTopic[topic] + betaSum);
}
}
public void learnParameters() {
// Create a "fake" pipe with the features in the data and
// a trove int-int hashmap of topic counts in the target.
if (parameterPipe == null) {
parameterPipe = new Noop();
parameterPipe.setDataAlphabet(data.get(0).instance.getTargetAlphabet());
parameterPipe.setTargetAlphabet(topicAlphabet);
}
InstanceList parameterInstances = new InstanceList(parameterPipe);
if (dmrParameters == null) {
dmrParameters = new MaxEnt(parameterPipe, new double[numFeatures * numTopics]);
}
for (int doc=0; doc < data.size(); doc++) {
if (data.get(doc).instance.getTarget() == null) {
continue;
}
FeatureCounter counter = new FeatureCounter(topicAlphabet);
for (int topic : data.get(doc).topicSequence.getFeatures()) {
counter.increment(topic);
}
// Put the real target in the data field, and the
// topic counts in the target field
parameterInstances.add( new Instance(data.get(doc).instance.getTarget(), counter.toFeatureVector(), null, null) );
}
DMROptimizable optimizable = new DMROptimizable(parameterInstances, dmrParameters);
optimizable.setRegularGaussianPriorVariance(0.5);
optimizable.setInterceptGaussianPriorVariance(100.0);
LimitedMemoryBFGS optimizer = new LimitedMemoryBFGS(optimizable);
// Optimize once
try {
optimizer.optimize();
} catch (IllegalArgumentException e) {
// step size too small
}
// Restart with a fresh initialization to improve likelihood
try {
optimizer.optimize();
} catch (IllegalArgumentException e) {
// step size too small
}
dmrParameters = optimizable.getClassifier();
for (int doc=0; doc < data.size(); doc++) {
Instance instance = data.get(doc).instance;
FeatureSequence tokens = (FeatureSequence) instance.getData();
if (instance.getTarget() == null) { continue; }
int numTokens = tokens.getLength();
// This sets alpha[] and alphaSum
setAlphas(instance);
// Now cache alpha values
for (int topic=0; topic < numTopics; topic++) {
alphaCache[doc][topic] = alpha[topic];
}
alphaSumCache[doc] = alphaSum;
}
}
public void printTopWords (PrintStream out, int numWords, boolean usingNewLines) {
if (dmrParameters != null) { setAlphas(); }
super.printTopWords(out, numWords, usingNewLines);
}
public void writeParameters(File parameterFile) throws IOException {
if (dmrParameters != null) {
PrintStream out = new PrintStream(parameterFile);
dmrParameters.print(out);
out.close();
}
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private static final int NULL_INTEGER = -1;
public static void main (String[] args) throws IOException {
InstanceList training = InstanceList.load (new File(args[0]));
int numTopics = args.length > 1 ? Integer.parseInt(args[1]) : 200;
InstanceList testing =
args.length > 2 ? InstanceList.load (new File(args[2])) : null;
DMRTopicModel lda = new DMRTopicModel (numTopics);
lda.setOptimizeInterval(100);
lda.setTopicDisplay(100, 10);
lda.addInstances(training);
lda.estimate();
lda.writeParameters(new File("dmr.parameters"));
lda.printState(new File("dmr.state.gz"));
}
} | 9,697 | 32.790941 | 126 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/NPTopicModel.java | /* Copyright (C) 2005 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.topics;
import java.util.*;
import java.util.logging.*;
import java.util.zip.*;
import java.io.*;
import java.text.NumberFormat;
import cc.mallet.topics.*;
import cc.mallet.types.*;
import cc.mallet.util.*;
import gnu.trove.*;
/**
* A non-parametric topic model that uses the "minimal path" assumption
* to reduce bookkeeping.
*
* @author David Mimno
*/
public class NPTopicModel implements Serializable {
private static Logger logger = MalletLogger.getLogger(NPTopicModel.class.getName());
// the training instances and their topic assignments
protected ArrayList<TopicAssignment> data;
// the alphabet for the input data
protected Alphabet alphabet;
// the alphabet for the topics
protected LabelAlphabet topicAlphabet;
// The largest topic ID seen so far
protected int maxTopic;
// The current number of topics
protected int numTopics;
// The size of the vocabulary
protected int numTypes;
// Prior parameters
protected double alpha;
protected double gamma;
protected double beta; // Prior on per-topic multinomial distribution over words
protected double betaSum;
public static final double DEFAULT_BETA = 0.01;
// Statistics needed for sampling.
protected TIntIntHashMap[] typeTopicCounts; // indexed by <feature index, topic index>
protected TIntIntHashMap tokensPerTopic; // indexed by <topic index>
// The number of documents that contain at least one
// token with a given topic.
protected TIntIntHashMap docsPerTopic;
protected int totalDocTopics = 0;
public int showTopicsInterval = 50;
public int wordsPerTopic = 10;
protected Randoms random;
protected NumberFormat formatter;
protected boolean printLogLikelihood = false;
/** @param alpha this parameter balances the local document topic counts with
* the global distribution over topics.
* @param gamma this parameter is the weight on a completely new, never-before-seen topic
* in the global distribution.
* @param beta this parameter controls the variability of the topic-word distributions
*/
public NPTopicModel (double alpha, double gamma, double beta) {
this.data = new ArrayList<TopicAssignment>();
this.topicAlphabet = AlphabetFactory.labelAlphabetOfSize(1);
this.alpha = alpha;
this.gamma = gamma;
this.beta = beta;
this.random = new Randoms();
tokensPerTopic = new TIntIntHashMap();
docsPerTopic = new TIntIntHashMap();
formatter = NumberFormat.getInstance();
formatter.setMaximumFractionDigits(5);
logger.info("Non-Parametric LDA");
}
public void setTopicDisplay(int interval, int n) {
this.showTopicsInterval = interval;
this.wordsPerTopic = n;
}
public void setRandomSeed(int seed) {
random = new Randoms(seed);
}
public void addInstances (InstanceList training, int initialTopics) {
alphabet = training.getDataAlphabet();
numTypes = alphabet.size();
betaSum = beta * numTypes;
typeTopicCounts = new TIntIntHashMap[numTypes];
for (int type=0; type < numTypes; type++) {
typeTopicCounts[type] = new TIntIntHashMap();
}
numTopics = initialTopics;
int doc = 0;
for (Instance instance : training) {
doc++;
TIntIntHashMap topicCounts = new TIntIntHashMap();
FeatureSequence tokens = (FeatureSequence) instance.getData();
LabelSequence topicSequence =
new LabelSequence(topicAlphabet, new int[ tokens.size() ]);
int[] topics = topicSequence.getFeatures();
for (int position = 0; position < tokens.size(); position++) {
int topic = random.nextInt(numTopics);
tokensPerTopic.adjustOrPutValue(topic, 1, 1);
topics[position] = topic;
// Keep track of the number of docs with at least one token
// in a given topic.
if (! topicCounts.containsKey(topic)) {
docsPerTopic.adjustOrPutValue(topic, 1, 1);
totalDocTopics++;
topicCounts.put(topic, 1);
}
else {
topicCounts.adjustValue(topic, 1);
}
int type = tokens.getIndexAtPosition(position);
typeTopicCounts[type].adjustOrPutValue(topic, 1, 1);
}
TopicAssignment t = new TopicAssignment (instance, topicSequence);
data.add (t);
}
maxTopic = numTopics - 1;
}
public void sample (int iterations) throws IOException {
for (int iteration = 1; iteration <= iterations; iteration++) {
long iterationStart = System.currentTimeMillis();
// Loop over every document in the corpus
for (int doc = 0; doc < data.size(); doc++) {
FeatureSequence tokenSequence =
(FeatureSequence) data.get(doc).instance.getData();
LabelSequence topicSequence =
(LabelSequence) data.get(doc).topicSequence;
sampleTopicsForOneDoc (tokenSequence, topicSequence);
}
long elapsedMillis = System.currentTimeMillis() - iterationStart;
logger.info(iteration + "\t" + elapsedMillis + "ms\t" + numTopics);
// Occasionally print more information
if (showTopicsInterval != 0 && iteration % showTopicsInterval == 0) {
logger.info("<" + iteration + "> #Topics: " + numTopics + "\n" +
topWords (wordsPerTopic));
}
}
}
protected void sampleTopicsForOneDoc (FeatureSequence tokenSequence,
FeatureSequence topicSequence) {
int[] topics = topicSequence.getFeatures();
TIntIntHashMap currentTypeTopicCounts;
int type, oldTopic, newTopic;
double topicWeightsSum;
int docLength = tokenSequence.getLength();
TIntIntHashMap localTopicCounts = new TIntIntHashMap();
// populate topic counts
for (int position = 0; position < docLength; position++) {
localTopicCounts.adjustOrPutValue(topics[position], 1, 1);
}
double score, sum;
double[] topicTermScores = new double[numTopics + 1];
// Store a list of all the topics that currently exist.
int[] allTopics = docsPerTopic.keys();
// Iterate over the positions (words) in the document
for (int position = 0; position < docLength; position++) {
type = tokenSequence.getIndexAtPosition(position);
oldTopic = topics[position];
// Grab the relevant row from our two-dimensional array
currentTypeTopicCounts = typeTopicCounts[type];
// Remove this token from all counts.
int currentCount = localTopicCounts.get(oldTopic);
// Was this the only token of this topic in the doc?
if (currentCount == 1) {
localTopicCounts.remove(oldTopic);
// Was this the only doc with this topic?
int docCount = docsPerTopic.get(oldTopic);
if (docCount == 1) {
// This should be the very last token
assert(tokensPerTopic.get(oldTopic) == 1);
// Get rid of the topic
docsPerTopic.remove(oldTopic);
totalDocTopics--;
tokensPerTopic.remove(oldTopic);
numTopics--;
allTopics = docsPerTopic.keys();
topicTermScores = new double[numTopics + 1];
}
else {
// This is the last in the doc, but the topic still exists
docsPerTopic.adjustValue(oldTopic, -1);
totalDocTopics--;
tokensPerTopic.adjustValue(oldTopic, -1);
}
}
else {
// There is at least one other token in this doc
// with this topic.
localTopicCounts.adjustValue(oldTopic, -1);
tokensPerTopic.adjustValue(oldTopic, -1);
}
if (currentTypeTopicCounts.get(oldTopic) == 1) {
currentTypeTopicCounts.remove(oldTopic);
}
else {
currentTypeTopicCounts.adjustValue(oldTopic, -1);
}
// Now calculate and add up the scores for each topic for this word
sum = 0.0;
// First do the topics that currently exist
for (int i = 0; i < numTopics; i++) {
int topic = allTopics[i];
topicTermScores[i] =
(localTopicCounts.get(topic) +
alpha * (docsPerTopic.get(topic) /
(totalDocTopics + gamma))) *
(currentTypeTopicCounts.get(topic) + beta) /
(tokensPerTopic.get(topic) + betaSum);
sum += topicTermScores[i];
}
// Add the weight for a new topic
topicTermScores[numTopics] =
alpha * gamma / ( numTypes * (totalDocTopics + gamma) );
sum += topicTermScores[numTopics];
// Choose a random point between 0 and the sum of all topic scores
double sample = random.nextUniform() * sum;
// Figure out which topic contains that point
newTopic = -1;
int i = -1;
while (sample > 0.0) {
i++;
sample -= topicTermScores[i];
}
if (i < numTopics) {
newTopic = allTopics[i];
topics[position] = newTopic;
currentTypeTopicCounts.adjustOrPutValue(newTopic, 1, 1);
tokensPerTopic.adjustValue(newTopic, 1);
if (localTopicCounts.containsKey(newTopic)) {
localTopicCounts.adjustValue(newTopic, 1);
}
else {
// This is not a new topic, but it is new for this doc.
localTopicCounts.put(newTopic, 1);
docsPerTopic.adjustValue(newTopic, 1);
totalDocTopics++;
}
}
else {
// completely new topic: first generate an ID
newTopic = maxTopic + 1;
maxTopic = newTopic;
numTopics++;
topics[position] = newTopic;
localTopicCounts.put(newTopic, 1);
docsPerTopic.put(newTopic, 1);
totalDocTopics++;
currentTypeTopicCounts.put(newTopic, 1);
tokensPerTopic.put(newTopic, 1);
allTopics = docsPerTopic.keys();
topicTermScores = new double[numTopics + 1];
}
}
}
//
// Methods for displaying and saving results
//
public String topWords (int numWords) {
StringBuilder output = new StringBuilder();
IDSorter[] sortedWords = new IDSorter[numTypes];
for (int topic: docsPerTopic.keys()) {
for (int type = 0; type < numTypes; type++) {
sortedWords[type] = new IDSorter(type, typeTopicCounts[type].get(topic));
}
Arrays.sort(sortedWords);
output.append(topic + "\t" + tokensPerTopic.get(topic) + "\t");
for (int i=0; i < numWords; i++) {
if (sortedWords[i].getWeight() < 1.0) {
break;
}
output.append(alphabet.lookupObject(sortedWords[i].getID()) + " ");
}
output.append("\n");
}
return output.toString();
}
public void printState (File f) throws IOException {
PrintStream out =
new PrintStream(new GZIPOutputStream(new BufferedOutputStream(new FileOutputStream(f))));
printState(out);
out.close();
}
public void printState (PrintStream out) {
out.println ("#doc source pos typeindex type topic");
for (int doc = 0; doc < data.size(); doc++) {
FeatureSequence tokenSequence = (FeatureSequence) data.get(doc).instance.getData();
LabelSequence topicSequence = (LabelSequence) data.get(doc).topicSequence;
String source = "NA";
if (data.get(doc).instance.getSource() != null) {
source = data.get(doc).instance.getSource().toString();
}
for (int position = 0; position < topicSequence.getLength(); position++) {
int type = tokenSequence.getIndexAtPosition(position);
int topic = topicSequence.getIndexAtPosition(position);
out.print(doc); out.print(' ');
out.print(source); out.print(' ');
out.print(position); out.print(' ');
out.print(type); out.print(' ');
out.print(alphabet.lookupObject(type)); out.print(' ');
out.print(topic); out.println();
}
}
}
public static void main (String[] args) throws IOException {
InstanceList training = InstanceList.load (new File(args[0]));
int numTopics = args.length > 1 ? Integer.parseInt(args[1]) : 200;
NPTopicModel lda = new NPTopicModel (5.0, 10.0, 0.1);
lda.addInstances(training, numTopics);
lda.sample(1000);
}
}
| 11,846 | 27.478365 | 92 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/DMROptimizable.java | package cc.mallet.topics;
/** This class implements the value and gradient functions for
* Dirichlet-multinomial Regression. See Guimaraes and Lindrooth,
* for a general introduction to DMR,
* and Mimno and McCallum (UAI, 2008) for an application to
* multinomial mixture models.
*/
import cc.mallet.optimize.Optimizable;
import cc.mallet.classify.MaxEnt;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Instance;
import cc.mallet.types.Alphabet;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.Dirichlet;
import cc.mallet.types.MatrixOps;
import cc.mallet.util.MalletLogger;
import cc.mallet.util.MalletProgressMessageLogger;
import java.util.logging.*;
import java.util.*;
import java.text.NumberFormat;
import java.text.DecimalFormat;
import gnu.trove.TIntIntHashMap;
public class DMROptimizable implements Optimizable.ByGradientValue {
private static Logger logger = MalletLogger.getLogger(DMROptimizable.class.getName());
private static Logger progressLogger = MalletProgressMessageLogger.getLogger(DMROptimizable.class.getName()+"-pl");
MaxEnt classifier;
InstanceList trainingList;
int numGetValueCalls = 0;
int numGetValueGradientCalls = 0;
int numIterations = Integer.MAX_VALUE;
NumberFormat formatter = null;
static final double DEFAULT_GAUSSIAN_PRIOR_VARIANCE = 1;
static final double DEFAULT_LARGE_GAUSSIAN_PRIOR_VARIANCE = 100;
static final double DEFAULT_GAUSSIAN_PRIOR_MEAN = 0.0;
double gaussianPriorMean = DEFAULT_GAUSSIAN_PRIOR_MEAN;
double gaussianPriorVariance = DEFAULT_GAUSSIAN_PRIOR_VARIANCE;
// Allowing the default feature (the base level) to
// fluctuate more freely than the feature parameters leads
// to much better results.
double defaultFeatureGaussianPriorVariance = DEFAULT_LARGE_GAUSSIAN_PRIOR_VARIANCE;
double[] parameters;
double[] cachedGradient;
double cachedValue;
boolean cachedValueStale;
boolean cachedGradientStale;
int numLabels;
int numFeatures;
int defaultFeatureIndex;
public DMROptimizable () {}
public DMROptimizable (InstanceList instances, MaxEnt initialClassifier) {
this.trainingList = instances;
Alphabet alphabet = instances.getDataAlphabet();
Alphabet labelAlphabet = instances.getTargetAlphabet();
this.numLabels = labelAlphabet.size();
// Add one feature for the "default feature".
this.numFeatures = alphabet.size() + 1; // add a spot for the intercept term
//System.out.println("num features: " + numFeatures + " numLabels: " + numLabels);
this.defaultFeatureIndex = numFeatures - 1;
this.parameters = new double [numLabels * numFeatures];
//this.constraints = new double [numLabels * numFeatures];
this.cachedGradient = new double [numLabels * numFeatures];
if (initialClassifier != null) {
this.classifier = initialClassifier;
this.parameters = classifier.getParameters();
this.defaultFeatureIndex = classifier.getDefaultFeatureIndex();
assert (initialClassifier.getInstancePipe() == instances.getPipe());
}
else if (this.classifier == null) {
this.classifier =
new MaxEnt (instances.getPipe(), parameters);
}
formatter = new DecimalFormat("0.###E0");
cachedValueStale = true;
cachedGradientStale = true;
// Initialize the constraints
logger.fine("Number of instances in training list = " + trainingList.size());
for (Instance instance : trainingList) {
FeatureVector multinomialValues = (FeatureVector) instance.getTarget();
if (multinomialValues == null)
continue;
FeatureVector features = (FeatureVector) instance.getData();
assert (features.getAlphabet() == alphabet);
boolean hasNaN = false;
for (int i = 0; i < features.numLocations(); i++) {
if (Double.isNaN(features.valueAtLocation(i))) {
logger.info("NaN for feature " + alphabet.lookupObject(features.indexAtLocation(i)).toString());
hasNaN = true;
}
}
if (hasNaN) {
logger.info("NaN in instance: " + instance.getName());
}
}
//TestMaximizable.testValueAndGradientCurrentParameters (this);
}
/** Set the variance for the default features (aka intercept terms), generally
* larger than the variance for the regular features.
*/
public void setInterceptGaussianPriorVariance(double sigmaSquared) {
this.defaultFeatureGaussianPriorVariance = sigmaSquared;
}
/** Set the variance for regular (non default) features, generally
* smaller than the variance for the default features.
*/
public void setRegularGaussianPriorVariance(double sigmaSquared) {
this.gaussianPriorVariance = sigmaSquared;
}
public MaxEnt getClassifier () { return classifier; }
public double getParameter (int index) {
return parameters[index];
}
public void setParameter (int index, double v) {
cachedValueStale = true;
cachedGradientStale = true;
parameters[index] = v;
}
public int getNumParameters() {
return parameters.length;
}
public void getParameters (double[] buff) {
if (buff == null || buff.length != parameters.length) {
buff = new double [parameters.length];
}
System.arraycopy (parameters, 0, buff, 0, parameters.length);
}
public void setParameters (double [] buff) {
assert (buff != null);
cachedValueStale = true;
cachedGradientStale = true;
if (buff.length != parameters.length)
parameters = new double[buff.length];
System.arraycopy (buff, 0, parameters, 0, buff.length);
}
/** The log probability of the observed count vectors given the features. */
public double getValue () {
if (! cachedValueStale) { return cachedValue; }
numGetValueCalls++;
cachedValue = 0;
// Incorporate likelihood of data
double[] scores = new double[ trainingList.getTargetAlphabet().size() ];
double value = 0.0;
int instanceIndex = 0;
for (Instance instance: trainingList) {
FeatureVector multinomialValues = (FeatureVector) instance.getTarget();
if (multinomialValues == null) { continue; }
//System.out.println("L Now "+inputAlphabet.size()+" regular features.");
// Get the predicted probability of each class
// under the current model parameters
this.classifier.getUnnormalizedClassificationScores(instance, scores);
double sumScores = 0.0;
// Exponentiate the scores
for (int i=0; i<scores.length; i++) {
// Due to underflow, it's very likely that some of these scores will be 0.0.
scores[i] = Math.exp(scores[i]);
sumScores += scores[i];
}
FeatureVector features = (FeatureVector) instance.getData();
// This is really an int, but since FeatureVectors are defined as doubles,
// avoid casting.
double totalLength = 0;
for (int i = 0; i < multinomialValues.numLocations(); i++) {
int label = multinomialValues.indexAtLocation(i);
double count = multinomialValues.valueAtLocation(i);
value += (Dirichlet.logGammaStirling(scores[label] + count) -
Dirichlet.logGammaStirling(scores[label]));
totalLength += count;
}
value -= (Dirichlet.logGammaStirling(sumScores + totalLength) -
Dirichlet.logGammaStirling(sumScores));
// Error Checking:
if (Double.isNaN(value)) {
logger.fine ("DCMMaxEntTrainer: Instance " + instance.getName() +
"has NaN value.");
for (int label: multinomialValues.getIndices()) {
logger.fine ("log(scores)= " + Math.log(scores[label]) +
" scores = " + scores[label]);
}
}
if (Double.isInfinite(value)) {
logger.warning ("Instance " + instance.getSource() +
" has infinite value; skipping value and gradient");
cachedValue -= value;
cachedValueStale = false;
return -value;
}
//System.out.println(value);
cachedValue += value;
instanceIndex++;
}
// Incorporate prior on parameters
double prior = 0;
// The log of a gaussian prior is x^2 / -2sigma^2
for (int label = 0; label < numLabels; label++) {
for (int feature = 0; feature < numFeatures - 1; feature++) {
double param = parameters[label*numFeatures + feature];
prior -= (param - gaussianPriorMean) * (param - gaussianPriorMean) / (2 * gaussianPriorVariance);
}
double param = parameters[label*numFeatures + defaultFeatureIndex];
prior -= (param - gaussianPriorMean) * (param - gaussianPriorMean) /
(2 * defaultFeatureGaussianPriorVariance);
}
double labelProbability = cachedValue;
cachedValue += prior;
cachedValueStale = false;
progressLogger.info ("Value (likelihood=" + formatter.format(labelProbability) +
" prior=" + formatter.format(prior) +
") = " + formatter.format(cachedValue));
return cachedValue;
}
public void getValueGradient (double [] buffer) {
MatrixOps.setAll (cachedGradient, 0.0);
// Incorporate likelihood of data
double[] scores = new double[ trainingList.getTargetAlphabet().size() ];
int instanceIndex = 0;
for (Instance instance: trainingList) {
FeatureVector multinomialValues = (FeatureVector) instance.getTarget();
if (multinomialValues == null) { continue; }
// Get the predicted probability of each class
// under the current model parameters
this.classifier.getUnnormalizedClassificationScores(instance, scores);
double sumScores = 0.0;
// Exponentiate the scores
for (int i=0; i<scores.length; i++) {
// Due to underflow, it's very likely that some of these scores will be 0.0.
scores[i] = Math.exp(scores[i]);
sumScores += scores[i];
}
FeatureVector features = (FeatureVector) instance.getData();
double totalLength = 0;
for (double count : multinomialValues.getValues()) {
totalLength += count;
}
double digammaDifferenceForSums =
Dirichlet.digamma(sumScores + totalLength) -
Dirichlet.digamma(sumScores);
for (int loc = 0; loc < features.numLocations(); loc++) {
int index = features.indexAtLocation(loc);
double value = features.valueAtLocation(loc);
if (value == 0.0) { continue; }
// In a FeatureVector, there's no easy way to say "do you know
// about this id?" so I've broken this into two for loops,
// one for all labels, the other for just the non-zero ones.
for (int label=0; label<numLabels; label++) {
cachedGradient[label * numFeatures + index] -=
value * scores[label] * digammaDifferenceForSums;
}
for (int labelLoc = 0; labelLoc <multinomialValues.numLocations(); labelLoc++) {
int label = multinomialValues.indexAtLocation(labelLoc);
double count = multinomialValues.valueAtLocation(labelLoc);
double diff = 0.0;
if (count < 20) {
for (int i=0; i < count; i++) {
diff += 1 / (scores[label] + i);
}
}
else {
diff = Dirichlet.digamma(scores[label] + count) -
Dirichlet.digamma(scores[label]);
}
cachedGradient[label * numFeatures + index] +=
value * scores[label] * diff;
}
}
// Now add the default feature
for (int label=0; label<numLabels; label++) {
cachedGradient[label * numFeatures + defaultFeatureIndex] -=
scores[label] * digammaDifferenceForSums;
}
for(int labelLoc = 0; labelLoc <multinomialValues.numLocations(); labelLoc++) {
int label = multinomialValues.indexAtLocation(labelLoc);
double count = multinomialValues.valueAtLocation(labelLoc);
double diff = 0.0;
if (count < 20) {
for (int i=0; i < count; i++) {
diff += 1 / (scores[label] + i);
}
}
else {
diff = Dirichlet.digamma(scores[label] + count) -
Dirichlet.digamma(scores[label]);
}
cachedGradient[label * numFeatures + defaultFeatureIndex] +=
scores[label] * diff;
}
}
numGetValueGradientCalls++;
for (int label = 0; label < numLabels; label++) {
for (int feature = 0; feature < numFeatures - 1; feature++) {
double param = parameters[label*numFeatures + feature];
cachedGradient[label * numFeatures + feature] -=
(param - gaussianPriorMean) / gaussianPriorVariance;
}
double param = parameters[label*numFeatures + defaultFeatureIndex];
cachedGradient[label * numFeatures + defaultFeatureIndex] -=
(param - gaussianPriorMean) / defaultFeatureGaussianPriorVariance;
}
// A parameter may be set to -infinity by an external user.
// We set gradient to 0 because the parameter's value can
// never change anyway and it will mess up future calculations
// on the matrix, such as norm().
MatrixOps.substitute (cachedGradient, Double.NEGATIVE_INFINITY, 0.0);
assert (buffer != null && buffer.length == parameters.length);
System.arraycopy (cachedGradient, 0, buffer, 0, cachedGradient.length);
//System.out.println ("DCMMaxEntTrainer gradient infinity norm = "+MatrixOps.infinityNorm(cachedGradient));
}
}
| 12,993 | 29.864608 | 116 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/ParallelTopicModel.java | /* Copyright (C) 2005 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.topics;
import java.util.Arrays;
import java.util.List;
import java.util.ArrayList;
import java.util.TreeSet;
import java.util.Iterator;
import java.util.concurrent.*;
import java.util.logging.*;
import java.util.zip.*;
import java.io.*;
import java.text.NumberFormat;
import cc.mallet.types.*;
import cc.mallet.topics.TopicAssignment;
import cc.mallet.util.Randoms;
import cc.mallet.util.MalletLogger;
/**
* Simple parallel threaded implementation of LDA,
* following the UCI NIPS paper, with SparseLDA
* sampling scheme and data structure.
*
* @author David Mimno, Andrew McCallum
*/
public class ParallelTopicModel implements Serializable {
protected static Logger logger = MalletLogger.getLogger(ParallelTopicModel.class.getName());
protected ArrayList<TopicAssignment> data; // the training instances and their topic assignments
protected Alphabet alphabet; // the alphabet for the input data
protected LabelAlphabet topicAlphabet; // the alphabet for the topics
protected int numTopics; // Number of topics to be fit
// These values are used to encode type/topic counts as
// count/topic pairs in a single int.
protected int topicMask;
protected int topicBits;
protected int numTypes;
protected int totalTokens;
protected double[] alpha; // Dirichlet(alpha,alpha,...) is the distribution over topics
protected double alphaSum;
protected double beta; // Prior on per-topic multinomial distribution over words
protected double betaSum;
protected boolean usingSymmetricAlpha = false;
public static final double DEFAULT_BETA = 0.01;
protected int[][] typeTopicCounts; // indexed by <feature index, topic index>
protected int[] tokensPerTopic; // indexed by <topic index>
// for dirichlet estimation
protected int[] docLengthCounts; // histogram of document sizes
protected int[][] topicDocCounts; // histogram of document/topic counts, indexed by <topic index, sequence position index>
public int numIterations = 1000;
public int burninPeriod = 200;
public int saveSampleInterval = 10;
public int optimizeInterval = 50;
public int temperingInterval = 0;
public int showTopicsInterval = 50;
public int wordsPerTopic = 7;
protected int saveStateInterval = 0;
protected String stateFilename = null;
protected int saveModelInterval = 0;
protected String modelFilename = null;
protected int randomSeed = -1;
protected NumberFormat formatter;
protected boolean printLogLikelihood = true;
// The number of times each type appears in the corpus
int[] typeTotals;
// The max over typeTotals, used for beta optimization
int maxTypeCount;
int numThreads = 1;
public ParallelTopicModel (int numberOfTopics) {
this (numberOfTopics, numberOfTopics, DEFAULT_BETA);
}
public ParallelTopicModel (int numberOfTopics, double alphaSum, double beta) {
this (newLabelAlphabet (numberOfTopics), alphaSum, beta);
}
private static LabelAlphabet newLabelAlphabet (int numTopics) {
LabelAlphabet ret = new LabelAlphabet();
for (int i = 0; i < numTopics; i++)
ret.lookupIndex("topic"+i);
return ret;
}
public ParallelTopicModel (LabelAlphabet topicAlphabet, double alphaSum, double beta)
{
this.data = new ArrayList<TopicAssignment>();
this.topicAlphabet = topicAlphabet;
this.numTopics = topicAlphabet.size();
if (Integer.bitCount(numTopics) == 1) {
// exact power of 2
topicMask = numTopics - 1;
topicBits = Integer.bitCount(topicMask);
}
else {
// otherwise add an extra bit
topicMask = Integer.highestOneBit(numTopics) * 2 - 1;
topicBits = Integer.bitCount(topicMask);
}
this.alphaSum = alphaSum;
this.alpha = new double[numTopics];
Arrays.fill(alpha, alphaSum / numTopics);
this.beta = beta;
tokensPerTopic = new int[numTopics];
formatter = NumberFormat.getInstance();
formatter.setMaximumFractionDigits(5);
logger.info("Coded LDA: " + numTopics + " topics, " + topicBits + " topic bits, " +
Integer.toBinaryString(topicMask) + " topic mask");
}
public Alphabet getAlphabet() { return alphabet; }
public LabelAlphabet getTopicAlphabet() { return topicAlphabet; }
public int getNumTopics() { return numTopics; }
public ArrayList<TopicAssignment> getData() { return data; }
public void setNumIterations (int numIterations) {
this.numIterations = numIterations;
}
public void setBurninPeriod (int burninPeriod) {
this.burninPeriod = burninPeriod;
}
public void setTopicDisplay(int interval, int n) {
this.showTopicsInterval = interval;
this.wordsPerTopic = n;
}
public void setRandomSeed(int seed) {
randomSeed = seed;
}
/** Interval for optimizing Dirichlet hyperparameters */
public void setOptimizeInterval(int interval) {
this.optimizeInterval = interval;
// Make sure we always have at least one sample
// before optimizing hyperparameters
if (saveSampleInterval > optimizeInterval) {
saveSampleInterval = optimizeInterval;
}
}
public void setSymmetricAlpha(boolean b) {
usingSymmetricAlpha = b;
}
public void setTemperingInterval(int interval) {
temperingInterval = interval;
}
public void setNumThreads(int threads) {
this.numThreads = threads;
}
/** Define how often and where to save a text representation of the current state.
* Files are GZipped.
*
* @param interval Save a copy of the state every <code>interval</code> iterations.
* @param filename Save the state to this file, with the iteration number as a suffix
*/
public void setSaveState(int interval, String filename) {
this.saveStateInterval = interval;
this.stateFilename = filename;
}
/** Define how often and where to save a serialized model.
*
* @param interval Save a serialized model every <code>interval</code> iterations.
* @param filename Save to this file, with the iteration number as a suffix
*/
public void setSaveSerializedModel(int interval, String filename) {
this.saveModelInterval = interval;
this.modelFilename = filename;
}
public void addInstances (InstanceList training) {
alphabet = training.getDataAlphabet();
numTypes = alphabet.size();
betaSum = beta * numTypes;
typeTopicCounts = new int[numTypes][];
// Get the total number of occurrences of each word type
//int[] typeTotals = new int[numTypes];
typeTotals = new int[numTypes];
int doc = 0;
for (Instance instance : training) {
doc++;
FeatureSequence tokens = (FeatureSequence) instance.getData();
for (int position = 0; position < tokens.getLength(); position++) {
int type = tokens.getIndexAtPosition(position);
typeTotals[ type ]++;
}
}
maxTypeCount = 0;
// Allocate enough space so that we never have to worry about
// overflows: either the number of topics or the number of times
// the type occurs.
for (int type = 0; type < numTypes; type++) {
if (typeTotals[type] > maxTypeCount) { maxTypeCount = typeTotals[type]; }
typeTopicCounts[type] = new int[ Math.min(numTopics, typeTotals[type]) ];
}
doc = 0;
Randoms random = null;
if (randomSeed == -1) {
random = new Randoms();
}
else {
random = new Randoms(randomSeed);
}
for (Instance instance : training) {
doc++;
FeatureSequence tokens = (FeatureSequence) instance.getData();
LabelSequence topicSequence =
new LabelSequence(topicAlphabet, new int[ tokens.size() ]);
int[] topics = topicSequence.getFeatures();
for (int position = 0; position < topics.length; position++) {
int topic = random.nextInt(numTopics);
topics[position] = topic;
}
TopicAssignment t = new TopicAssignment (instance, topicSequence);
data.add (t);
}
buildInitialTypeTopicCounts();
initializeHistograms();
}
public void initializeFromState(File stateFile) throws IOException {
String line;
String[] fields;
BufferedReader reader = new BufferedReader(new InputStreamReader(new GZIPInputStream(new FileInputStream(stateFile))));
line = reader.readLine();
// Skip some lines starting with "#" that describe the format and specify hyperparameters
while (line.startsWith("#")) {
line = reader.readLine();
}
fields = line.split(" ");
for (TopicAssignment document: data) {
FeatureSequence tokens = (FeatureSequence) document.instance.getData();
FeatureSequence topicSequence = (FeatureSequence) document.topicSequence;
int[] topics = topicSequence.getFeatures();
for (int position = 0; position < tokens.size(); position++) {
int type = tokens.getIndexAtPosition(position);
if (type == Integer.parseInt(fields[3])) {
topics[position] = Integer.parseInt(fields[5]);
}
else {
System.err.println("instance list and state do not match: " + line);
throw new IllegalStateException();
}
line = reader.readLine();
if (line != null) {
fields = line.split(" ");
}
}
}
buildInitialTypeTopicCounts();
initializeHistograms();
}
public void buildInitialTypeTopicCounts () {
// Clear the topic totals
Arrays.fill(tokensPerTopic, 0);
// Clear the type/topic counts, only
// looking at the entries before the first 0 entry.
for (int type = 0; type < numTypes; type++) {
int[] topicCounts = typeTopicCounts[type];
int position = 0;
while (position < topicCounts.length &&
topicCounts[position] > 0) {
topicCounts[position] = 0;
position++;
}
}
for (TopicAssignment document : data) {
FeatureSequence tokens = (FeatureSequence) document.instance.getData();
FeatureSequence topicSequence = (FeatureSequence) document.topicSequence;
int[] topics = topicSequence.getFeatures();
for (int position = 0; position < tokens.size(); position++) {
int topic = topics[position];
tokensPerTopic[topic]++;
// The format for these arrays is
// the topic in the rightmost bits
// the count in the remaining (left) bits.
// Since the count is in the high bits, sorting (desc)
// by the numeric value of the int guarantees that
// higher counts will be before the lower counts.
int type = tokens.getIndexAtPosition(position);
int[] currentTypeTopicCounts = typeTopicCounts[ type ];
// Start by assuming that the array is either empty
// or is in sorted (descending) order.
// Here we are only adding counts, so if we find
// an existing location with the topic, we only need
// to ensure that it is not larger than its left neighbor.
int index = 0;
int currentTopic = currentTypeTopicCounts[index] & topicMask;
int currentValue;
while (currentTypeTopicCounts[index] > 0 && currentTopic != topic) {
index++;
if (index == currentTypeTopicCounts.length) {
logger.info("overflow on type " + type);
}
currentTopic = currentTypeTopicCounts[index] & topicMask;
}
currentValue = currentTypeTopicCounts[index] >> topicBits;
if (currentValue == 0) {
// new value is 1, so we don't have to worry about sorting
// (except by topic suffix, which doesn't matter)
currentTypeTopicCounts[index] =
(1 << topicBits) + topic;
}
else {
currentTypeTopicCounts[index] =
((currentValue + 1) << topicBits) + topic;
// Now ensure that the array is still sorted by
// bubbling this value up.
while (index > 0 &&
currentTypeTopicCounts[index] > currentTypeTopicCounts[index - 1]) {
int temp = currentTypeTopicCounts[index];
currentTypeTopicCounts[index] = currentTypeTopicCounts[index - 1];
currentTypeTopicCounts[index - 1] = temp;
index--;
}
}
}
}
}
public void sumTypeTopicCounts (WorkerRunnable[] runnables) {
// Clear the topic totals
Arrays.fill(tokensPerTopic, 0);
// Clear the type/topic counts, only
// looking at the entries before the first 0 entry.
for (int type = 0; type < numTypes; type++) {
int[] targetCounts = typeTopicCounts[type];
int position = 0;
while (position < targetCounts.length &&
targetCounts[position] > 0) {
targetCounts[position] = 0;
position++;
}
}
for (int thread = 0; thread < numThreads; thread++) {
// Handle the total-tokens-per-topic array
int[] sourceTotals = runnables[thread].getTokensPerTopic();
for (int topic = 0; topic < numTopics; topic++) {
tokensPerTopic[topic] += sourceTotals[topic];
}
// Now handle the individual type topic counts
int[][] sourceTypeTopicCounts =
runnables[thread].getTypeTopicCounts();
for (int type = 0; type < numTypes; type++) {
// Here the source is the individual thread counts,
// and the target is the global counts.
int[] sourceCounts = sourceTypeTopicCounts[type];
int[] targetCounts = typeTopicCounts[type];
int sourceIndex = 0;
while (sourceIndex < sourceCounts.length &&
sourceCounts[sourceIndex] > 0) {
int topic = sourceCounts[sourceIndex] & topicMask;
int count = sourceCounts[sourceIndex] >> topicBits;
int targetIndex = 0;
int currentTopic = targetCounts[targetIndex] & topicMask;
int currentCount;
while (targetCounts[targetIndex] > 0 && currentTopic != topic) {
targetIndex++;
if (targetIndex == targetCounts.length) {
logger.info("overflow in merging on type " + type);
}
currentTopic = targetCounts[targetIndex] & topicMask;
}
currentCount = targetCounts[targetIndex] >> topicBits;
targetCounts[targetIndex] =
((currentCount + count) << topicBits) + topic;
// Now ensure that the array is still sorted by
// bubbling this value up.
while (targetIndex > 0 &&
targetCounts[targetIndex] > targetCounts[targetIndex - 1]) {
int temp = targetCounts[targetIndex];
targetCounts[targetIndex] = targetCounts[targetIndex - 1];
targetCounts[targetIndex - 1] = temp;
targetIndex--;
}
sourceIndex++;
}
}
}
/* // Debuggging code to ensure counts are being
// reconstructed correctly.
for (int type = 0; type < numTypes; type++) {
int[] targetCounts = typeTopicCounts[type];
int index = 0;
int count = 0;
while (index < targetCounts.length &&
targetCounts[index] > 0) {
count += targetCounts[index] >> topicBits;
index++;
}
if (count != typeTotals[type]) {
System.err.println("Expected " + typeTotals[type] + ", found " + count);
}
}
*/
}
/**
* Gather statistics on the size of documents
* and create histograms for use in Dirichlet hyperparameter
* optimization.
*/
private void initializeHistograms() {
int maxTokens = 0;
totalTokens = 0;
int seqLen;
for (int doc = 0; doc < data.size(); doc++) {
FeatureSequence fs = (FeatureSequence) data.get(doc).instance.getData();
seqLen = fs.getLength();
if (seqLen > maxTokens)
maxTokens = seqLen;
totalTokens += seqLen;
}
logger.info("max tokens: " + maxTokens);
logger.info("total tokens: " + totalTokens);
docLengthCounts = new int[maxTokens + 1];
topicDocCounts = new int[numTopics][maxTokens + 1];
}
public void optimizeAlpha(WorkerRunnable[] runnables) {
// First clear the sufficient statistic histograms
Arrays.fill(docLengthCounts, 0);
for (int topic = 0; topic < topicDocCounts.length; topic++) {
Arrays.fill(topicDocCounts[topic], 0);
}
for (int thread = 0; thread < numThreads; thread++) {
int[] sourceLengthCounts = runnables[thread].getDocLengthCounts();
int[][] sourceTopicCounts = runnables[thread].getTopicDocCounts();
for (int count=0; count < sourceLengthCounts.length; count++) {
if (sourceLengthCounts[count] > 0) {
docLengthCounts[count] += sourceLengthCounts[count];
sourceLengthCounts[count] = 0;
}
}
for (int topic=0; topic < numTopics; topic++) {
if (! usingSymmetricAlpha) {
for (int count=0; count < sourceTopicCounts[topic].length; count++) {
if (sourceTopicCounts[topic][count] > 0) {
topicDocCounts[topic][count] += sourceTopicCounts[topic][count];
sourceTopicCounts[topic][count] = 0;
}
}
}
else {
// For the symmetric version, we only need one
// count array, which I'm putting in the same
// data structure, but for topic 0. All other
// topic histograms will be empty.
// I'm duplicating this for loop, which
// isn't the best thing, but it means only checking
// whether we are symmetric or not numTopics times,
// instead of numTopics * longest document length.
for (int count=0; count < sourceTopicCounts[topic].length; count++) {
if (sourceTopicCounts[topic][count] > 0) {
topicDocCounts[0][count] += sourceTopicCounts[topic][count];
// ^ the only change
sourceTopicCounts[topic][count] = 0;
}
}
}
}
}
if (usingSymmetricAlpha) {
alphaSum = Dirichlet.learnSymmetricConcentration(topicDocCounts[0],
docLengthCounts,
numTopics,
alphaSum);
for (int topic = 0; topic < numTopics; topic++) {
alpha[topic] = alphaSum / numTopics;
}
}
else {
alphaSum = Dirichlet.learnParameters(alpha, topicDocCounts, docLengthCounts, 1.001, 1.0, 1);
}
}
public void temperAlpha(WorkerRunnable[] runnables) {
// First clear the sufficient statistic histograms
Arrays.fill(docLengthCounts, 0);
for (int topic = 0; topic < topicDocCounts.length; topic++) {
Arrays.fill(topicDocCounts[topic], 0);
}
for (int thread = 0; thread < numThreads; thread++) {
int[] sourceLengthCounts = runnables[thread].getDocLengthCounts();
int[][] sourceTopicCounts = runnables[thread].getTopicDocCounts();
for (int count=0; count < sourceLengthCounts.length; count++) {
if (sourceLengthCounts[count] > 0) {
sourceLengthCounts[count] = 0;
}
}
for (int topic=0; topic < numTopics; topic++) {
for (int count=0; count < sourceTopicCounts[topic].length; count++) {
if (sourceTopicCounts[topic][count] > 0) {
sourceTopicCounts[topic][count] = 0;
}
}
}
}
for (int topic = 0; topic < numTopics; topic++) {
alpha[topic] = 1.0;
}
alphaSum = numTopics;
}
public void optimizeBeta(WorkerRunnable[] runnables) {
// The histogram starts at count 0, so if all of the
// tokens of the most frequent type were assigned to one topic,
// we would need to store a maxTypeCount + 1 count.
int[] countHistogram = new int[maxTypeCount + 1];
// Now count the number of type/topic pairs that have
// each number of tokens.
int index;
for (int type = 0; type < numTypes; type++) {
int[] counts = typeTopicCounts[type];
index = 0;
while (index < counts.length &&
counts[index] > 0) {
int count = counts[index] >> topicBits;
countHistogram[count]++;
index++;
}
}
// Figure out how large we need to make the "observation lengths"
// histogram.
int maxTopicSize = 0;
for (int topic = 0; topic < numTopics; topic++) {
if (tokensPerTopic[topic] > maxTopicSize) {
maxTopicSize = tokensPerTopic[topic];
}
}
// Now allocate it and populate it.
int[] topicSizeHistogram = new int[maxTopicSize + 1];
for (int topic = 0; topic < numTopics; topic++) {
topicSizeHistogram[ tokensPerTopic[topic] ]++;
}
betaSum = Dirichlet.learnSymmetricConcentration(countHistogram,
topicSizeHistogram,
numTypes,
betaSum);
beta = betaSum / numTypes;
logger.info("[beta: " + formatter.format(beta) + "] ");
// Now publish the new value
for (int thread = 0; thread < numThreads; thread++) {
runnables[thread].resetBeta(beta, betaSum);
}
}
public void estimate () throws IOException {
long startTime = System.currentTimeMillis();
WorkerRunnable[] runnables = new WorkerRunnable[numThreads];
int docsPerThread = data.size() / numThreads;
int offset = 0;
if (numThreads > 1) {
for (int thread = 0; thread < numThreads; thread++) {
int[] runnableTotals = new int[numTopics];
System.arraycopy(tokensPerTopic, 0, runnableTotals, 0, numTopics);
int[][] runnableCounts = new int[numTypes][];
for (int type = 0; type < numTypes; type++) {
int[] counts = new int[typeTopicCounts[type].length];
System.arraycopy(typeTopicCounts[type], 0, counts, 0, counts.length);
runnableCounts[type] = counts;
}
// some docs may be missing at the end due to integer division
if (thread == numThreads - 1) {
docsPerThread = data.size() - offset;
}
Randoms random = null;
if (randomSeed == -1) {
random = new Randoms();
}
else {
random = new Randoms(randomSeed);
}
runnables[thread] = new WorkerRunnable(numTopics,
alpha, alphaSum, beta,
random, data,
runnableCounts, runnableTotals,
offset, docsPerThread);
runnables[thread].initializeAlphaStatistics(docLengthCounts.length);
offset += docsPerThread;
}
}
else {
// If there is only one thread, copy the typeTopicCounts
// arrays directly, rather than allocating new memory.
Randoms random = null;
if (randomSeed == -1) {
random = new Randoms();
}
else {
random = new Randoms(randomSeed);
}
runnables[0] = new WorkerRunnable(numTopics,
alpha, alphaSum, beta,
random, data,
typeTopicCounts, tokensPerTopic,
offset, docsPerThread);
runnables[0].initializeAlphaStatistics(docLengthCounts.length);
// If there is only one thread, we
// can avoid communications overhead.
// This switch informs the thread not to
// gather statistics for its portion of the data.
runnables[0].makeOnlyThread();
}
ExecutorService executor = Executors.newFixedThreadPool(numThreads);
for (int iteration = 1; iteration <= numIterations; iteration++) {
long iterationStart = System.currentTimeMillis();
if (showTopicsInterval != 0 && iteration != 0 && iteration % showTopicsInterval == 0) {
logger.info("\n" + displayTopWords (wordsPerTopic, false));
}
if (saveStateInterval != 0 && iteration % saveStateInterval == 0) {
this.printState(new File(stateFilename + '.' + iteration));
}
if (saveModelInterval != 0 && iteration % saveModelInterval == 0) {
this.write(new File(modelFilename + '.' + iteration));
}
if (numThreads > 1) {
// Submit runnables to thread pool
for (int thread = 0; thread < numThreads; thread++) {
if (iteration > burninPeriod && optimizeInterval != 0 &&
iteration % saveSampleInterval == 0) {
runnables[thread].collectAlphaStatistics();
}
logger.fine("submitting thread " + thread);
executor.submit(runnables[thread]);
//runnables[thread].run();
}
// I'm getting some problems that look like
// a thread hasn't started yet when it is first
// polled, so it appears to be finished.
// This only occurs in very short corpora.
try {
Thread.sleep(20);
} catch (InterruptedException e) {
}
boolean finished = false;
while (! finished) {
try {
Thread.sleep(10);
} catch (InterruptedException e) {
}
finished = true;
// Are all the threads done?
for (int thread = 0; thread < numThreads; thread++) {
//logger.info("thread " + thread + " done? " + runnables[thread].isFinished);
finished = finished && runnables[thread].isFinished;
}
}
//System.out.print("[" + (System.currentTimeMillis() - iterationStart) + "] ");
sumTypeTopicCounts(runnables);
//System.out.print("[" + (System.currentTimeMillis() - iterationStart) + "] ");
for (int thread = 0; thread < numThreads; thread++) {
int[] runnableTotals = runnables[thread].getTokensPerTopic();
System.arraycopy(tokensPerTopic, 0, runnableTotals, 0, numTopics);
int[][] runnableCounts = runnables[thread].getTypeTopicCounts();
for (int type = 0; type < numTypes; type++) {
int[] targetCounts = runnableCounts[type];
int[] sourceCounts = typeTopicCounts[type];
int index = 0;
while (index < sourceCounts.length) {
if (sourceCounts[index] != 0) {
targetCounts[index] = sourceCounts[index];
}
else if (targetCounts[index] != 0) {
targetCounts[index] = 0;
}
else {
break;
}
index++;
}
//System.arraycopy(typeTopicCounts[type], 0, counts, 0, counts.length);
}
}
}
else {
if (iteration > burninPeriod && optimizeInterval != 0 &&
iteration % saveSampleInterval == 0) {
runnables[0].collectAlphaStatistics();
}
runnables[0].run();
}
long elapsedMillis = System.currentTimeMillis() - iterationStart;
if (elapsedMillis < 1000) {
logger.fine(elapsedMillis + "ms ");
}
else {
logger.fine((elapsedMillis/1000) + "s ");
}
if (iteration > burninPeriod && optimizeInterval != 0 &&
iteration % optimizeInterval == 0) {
optimizeAlpha(runnables);
optimizeBeta(runnables);
logger.fine("[O " + (System.currentTimeMillis() - iterationStart) + "] ");
}
if (iteration % 10 == 0) {
if (printLogLikelihood) {
logger.info ("<" + iteration + "> LL/token: " + formatter.format(modelLogLikelihood() / totalTokens));
}
else {
logger.info ("<" + iteration + ">");
}
}
}
executor.shutdownNow();
long seconds = Math.round((System.currentTimeMillis() - startTime)/1000.0);
long minutes = seconds / 60; seconds %= 60;
long hours = minutes / 60; minutes %= 60;
long days = hours / 24; hours %= 24;
StringBuilder timeReport = new StringBuilder();
timeReport.append("\nTotal time: ");
if (days != 0) { timeReport.append(days); timeReport.append(" days "); }
if (hours != 0) { timeReport.append(hours); timeReport.append(" hours "); }
if (minutes != 0) { timeReport.append(minutes); timeReport.append(" minutes "); }
timeReport.append(seconds); timeReport.append(" seconds");
logger.info(timeReport.toString());
}
public void printTopWords (File file, int numWords, boolean useNewLines) throws IOException {
PrintStream out = new PrintStream (file);
printTopWords(out, numWords, useNewLines);
out.close();
}
/**
* Return an array of sorted sets (one set per topic). Each set
* contains IDSorter objects with integer keys into the alphabet.
* To get direct access to the Strings, use getTopWords().
*/
public TreeSet[] getSortedWords () {
TreeSet[] topicSortedWords = new TreeSet[ numTopics ];
// Initialize the tree sets
for (int topic = 0; topic < numTopics; topic++) {
topicSortedWords[topic] = new TreeSet<IDSorter>();
}
// Collect counts
for (int type = 0; type < numTypes; type++) {
int[] topicCounts = typeTopicCounts[type];
int index = 0;
while (index < topicCounts.length &&
topicCounts[index] > 0) {
int topic = topicCounts[index] & topicMask;
int count = topicCounts[index] >> topicBits;
topicSortedWords[topic].add(new IDSorter(type, count));
index++;
}
}
return topicSortedWords;
}
/** Return an array (one element for each topic) of arrays of words, which
* are the most probable words for that topic in descending order. These
* are returned as Objects, but will probably be Strings.
*
* @param numWords The maximum length of each topic's array of words (may be less).
*/
public Object[][] getTopWords(int numWords) {
TreeSet[] topicSortedWords = getSortedWords();
Object[][] result = new Object[ numTopics ][];
for (int topic = 0; topic < numTopics; topic++) {
TreeSet<IDSorter> sortedWords = topicSortedWords[topic];
// How many words should we report? Some topics may have fewer than
// the default number of words with non-zero weight.
int limit = numWords;
if (sortedWords.size() < numWords) { limit = sortedWords.size(); }
result[topic] = new Object[limit];
Iterator<IDSorter> iterator = sortedWords.iterator();
for (int i=0; i < limit; i++) {
IDSorter info = iterator.next();
result[topic][i] = alphabet.lookupObject(info.getID());
}
}
return result;
}
public void printTopWords (PrintStream out, int numWords, boolean usingNewLines) {
out.print(displayTopWords(numWords, usingNewLines));
}
public String displayTopWords (int numWords, boolean usingNewLines) {
StringBuilder out = new StringBuilder();
TreeSet[] topicSortedWords = getSortedWords();
// Print results for each topic
for (int topic = 0; topic < numTopics; topic++) {
TreeSet<IDSorter> sortedWords = topicSortedWords[topic];
int word = 1;
Iterator<IDSorter> iterator = sortedWords.iterator();
if (usingNewLines) {
out.append (topic + "\t" + formatter.format(alpha[topic]) + "\n");
while (iterator.hasNext() && word < numWords) {
IDSorter info = iterator.next();
out.append(alphabet.lookupObject(info.getID()) + "\t" + formatter.format(info.getWeight()) + "\n");
word++;
}
}
else {
out.append (topic + "\t" + formatter.format(alpha[topic]) + "\t");
while (iterator.hasNext() && word < numWords) {
IDSorter info = iterator.next();
out.append(alphabet.lookupObject(info.getID()) + " ");
word++;
}
out.append ("\n");
}
}
return out.toString();
}
public void topicXMLReport (PrintWriter out, int numWords) {
TreeSet[] topicSortedWords = getSortedWords();
out.println("<?xml version='1.0' ?>");
out.println("<topicModel>");
for (int topic = 0; topic < numTopics; topic++) {
out.println(" <topic id='" + topic + "' alpha='" + alpha[topic] +
"' totalTokens='" + tokensPerTopic[topic] + "'>");
int word = 1;
Iterator<IDSorter> iterator = topicSortedWords[topic].iterator();
while (iterator.hasNext() && word < numWords) {
IDSorter info = iterator.next();
out.println(" <word rank='" + word + "'>" +
alphabet.lookupObject(info.getID()) +
"</word>");
word++;
}
out.println(" </topic>");
}
out.println("</topicModel>");
}
public void topicPhraseXMLReport(PrintWriter out, int numWords) {
int numTopics = this.getNumTopics();
gnu.trove.TObjectIntHashMap<String>[] phrases = new gnu.trove.TObjectIntHashMap[numTopics];
Alphabet alphabet = this.getAlphabet();
// Get counts of phrases
for (int ti = 0; ti < numTopics; ti++)
phrases[ti] = new gnu.trove.TObjectIntHashMap<String>();
for (int di = 0; di < this.getData().size(); di++) {
TopicAssignment t = this.getData().get(di);
Instance instance = t.instance;
FeatureSequence fvs = (FeatureSequence) instance.getData();
boolean withBigrams = false;
if (fvs instanceof FeatureSequenceWithBigrams) withBigrams = true;
int prevtopic = -1;
int prevfeature = -1;
int topic = -1;
StringBuffer sb = null;
int feature = -1;
int doclen = fvs.size();
for (int pi = 0; pi < doclen; pi++) {
feature = fvs.getIndexAtPosition(pi);
topic = this.getData().get(di).topicSequence.getIndexAtPosition(pi);
if (topic == prevtopic && (!withBigrams || ((FeatureSequenceWithBigrams)fvs).getBiIndexAtPosition(pi) != -1)) {
if (sb == null)
sb = new StringBuffer (alphabet.lookupObject(prevfeature).toString() + " " + alphabet.lookupObject(feature));
else {
sb.append (" ");
sb.append (alphabet.lookupObject(feature));
}
} else if (sb != null) {
String sbs = sb.toString();
//logger.info ("phrase:"+sbs);
if (phrases[prevtopic].get(sbs) == 0)
phrases[prevtopic].put(sbs,0);
phrases[prevtopic].increment(sbs);
prevtopic = prevfeature = -1;
sb = null;
} else {
prevtopic = topic;
prevfeature = feature;
}
}
}
// phrases[] now filled with counts
// Now start printing the XML
out.println("<?xml version='1.0' ?>");
out.println("<topics>");
TreeSet[] topicSortedWords = getSortedWords();
double[] probs = new double[alphabet.size()];
for (int ti = 0; ti < numTopics; ti++) {
out.print(" <topic id=\"" + ti + "\" alpha=\"" + alpha[ti] +
"\" totalTokens=\"" + tokensPerTopic[ti] + "\" ");
// For gathering <term> and <phrase> output temporarily
// so that we can get topic-title information before printing it to "out".
ByteArrayOutputStream bout = new ByteArrayOutputStream();
PrintStream pout = new PrintStream (bout);
// For holding candidate topic titles
AugmentableFeatureVector titles = new AugmentableFeatureVector (new Alphabet());
// Print words
int word = 1;
Iterator<IDSorter> iterator = topicSortedWords[ti].iterator();
while (iterator.hasNext() && word < numWords) {
IDSorter info = iterator.next();
pout.println(" <word weight=\""+(info.getWeight()/tokensPerTopic[ti])+"\" count=\""+Math.round(info.getWeight())+"\">"
+ alphabet.lookupObject(info.getID()) +
"</word>");
word++;
if (word < 20) // consider top 20 individual words as candidate titles
titles.add(alphabet.lookupObject(info.getID()), info.getWeight());
}
/*
for (int type = 0; type < alphabet.size(); type++)
probs[type] = this.getCountFeatureTopic(type, ti) / (double)this.getCountTokensPerTopic(ti);
RankedFeatureVector rfv = new RankedFeatureVector (alphabet, probs);
for (int ri = 0; ri < numWords; ri++) {
int fi = rfv.getIndexAtRank(ri);
pout.println (" <term weight=\""+probs[fi]+"\" count=\""+this.getCountFeatureTopic(fi,ti)+"\">"+alphabet.lookupObject(fi)+ "</term>");
if (ri < 20) // consider top 20 individual words as candidate titles
titles.add(alphabet.lookupObject(fi), this.getCountFeatureTopic(fi,ti));
}
*/
// Print phrases
Object[] keys = phrases[ti].keys();
int[] values = phrases[ti].getValues();
double counts[] = new double[keys.length];
for (int i = 0; i < counts.length; i++) counts[i] = values[i];
double countssum = MatrixOps.sum (counts);
Alphabet alph = new Alphabet(keys);
RankedFeatureVector rfv = new RankedFeatureVector (alph, counts);
int max = rfv.numLocations() < numWords ? rfv.numLocations() : numWords;
for (int ri = 0; ri < max; ri++) {
int fi = rfv.getIndexAtRank(ri);
pout.println (" <phrase weight=\""+counts[fi]/countssum+"\" count=\""+values[fi]+"\">"+alph.lookupObject(fi)+ "</phrase>");
// Any phrase count less than 20 is simply unreliable
if (ri < 20 && values[fi] > 20)
titles.add(alph.lookupObject(fi), 100*values[fi]); // prefer phrases with a factor of 100
}
// Select candidate titles
StringBuffer titlesStringBuffer = new StringBuffer();
rfv = new RankedFeatureVector (titles.getAlphabet(), titles);
int numTitles = 10;
for (int ri = 0; ri < numTitles && ri < rfv.numLocations(); ri++) {
// Don't add redundant titles
if (titlesStringBuffer.indexOf(rfv.getObjectAtRank(ri).toString()) == -1) {
titlesStringBuffer.append (rfv.getObjectAtRank(ri));
if (ri < numTitles-1)
titlesStringBuffer.append (", ");
} else
numTitles++;
}
out.println("titles=\"" + titlesStringBuffer.toString() + "\">");
out.print(bout.toString());
out.println(" </topic>");
}
out.println("</topics>");
}
/**
* Write the internal representation of type-topic counts
* (count/topic pairs in descending order by count) to a file.
*/
public void printTypeTopicCounts(File file) throws IOException {
PrintWriter out = new PrintWriter (new FileWriter (file) );
for (int type = 0; type < numTypes; type++) {
StringBuilder buffer = new StringBuilder();
buffer.append(type + " " + alphabet.lookupObject(type));
int[] topicCounts = typeTopicCounts[type];
int index = 0;
while (index < topicCounts.length &&
topicCounts[index] > 0) {
int topic = topicCounts[index] & topicMask;
int count = topicCounts[index] >> topicBits;
buffer.append(" " + topic + ":" + count);
index++;
}
out.println(buffer);
}
out.close();
}
public void printTopicWordWeights(File file) throws IOException {
PrintWriter out = new PrintWriter (new FileWriter (file) );
printTopicWordWeights(out);
out.close();
}
/**
* Print an unnormalized weight for every word in every topic.
* Most of these will be equal to the smoothing parameter beta.
*/
public void printTopicWordWeights(PrintWriter out) throws IOException {
// Probably not the most efficient way to do this...
for (int topic = 0; topic < numTopics; topic++) {
for (int type = 0; type < numTypes; type++) {
int[] topicCounts = typeTopicCounts[type];
double weight = beta;
int index = 0;
while (index < topicCounts.length &&
topicCounts[index] > 0) {
int currentTopic = topicCounts[index] & topicMask;
if (currentTopic == topic) {
weight += topicCounts[index] >> topicBits;
break;
}
index++;
}
out.println(topic + "\t" + alphabet.lookupObject(type) + "\t" + weight);
}
}
}
public void printDocumentTopics (File file) throws IOException {
PrintWriter out = new PrintWriter (new FileWriter (file) );
printDocumentTopics (out);
out.close();
}
public void printDocumentTopics (PrintWriter out) {
printDocumentTopics (out, 0.0, -1);
}
/**
* @param out A print writer
* @param threshold Only print topics with proportion greater than this number
* @param max Print no more than this many topics
*/
public void printDocumentTopics (PrintWriter out, double threshold, int max) {
out.print ("#doc source topic proportion ...\n");
int docLen;
int[] topicCounts = new int[ numTopics ];
IDSorter[] sortedTopics = new IDSorter[ numTopics ];
for (int topic = 0; topic < numTopics; topic++) {
// Initialize the sorters with dummy values
sortedTopics[topic] = new IDSorter(topic, topic);
}
if (max < 0 || max > numTopics) {
max = numTopics;
}
for (int doc = 0; doc < data.size(); doc++) {
LabelSequence topicSequence = (LabelSequence) data.get(doc).topicSequence;
int[] currentDocTopics = topicSequence.getFeatures();
out.print (doc); out.print (' ');
if (data.get(doc).instance.getSource() != null) {
out.print (data.get(doc).instance.getSource());
}
else {
out.print ("null-source");
}
out.print (' ');
docLen = currentDocTopics.length;
// Count up the tokens
for (int token=0; token < docLen; token++) {
topicCounts[ currentDocTopics[token] ]++;
}
// And normalize
for (int topic = 0; topic < numTopics; topic++) {
sortedTopics[topic].set(topic, (float) topicCounts[topic] / docLen);
}
Arrays.sort(sortedTopics);
for (int i = 0; i < max; i++) {
if (sortedTopics[i].getWeight() < threshold) { break; }
out.print (sortedTopics[i].getID() + " " +
sortedTopics[i].getWeight() + " ");
}
out.print (" \n");
Arrays.fill(topicCounts, 0);
}
}
public void printState (File f) throws IOException {
PrintStream out =
new PrintStream(new GZIPOutputStream(new BufferedOutputStream(new FileOutputStream(f))));
printState(out);
out.close();
}
public void printState (PrintStream out) {
out.println ("#doc source pos typeindex type topic");
out.print("#alpha : ");
for (int topic = 0; topic < numTopics; topic++) {
out.print(alpha[topic] + " ");
}
out.println();
out.println("#beta : " + beta);
for (int doc = 0; doc < data.size(); doc++) {
FeatureSequence tokenSequence = (FeatureSequence) data.get(doc).instance.getData();
LabelSequence topicSequence = (LabelSequence) data.get(doc).topicSequence;
String source = "NA";
if (data.get(doc).instance.getSource() != null) {
source = data.get(doc).instance.getSource().toString();
}
for (int pi = 0; pi < topicSequence.getLength(); pi++) {
int type = tokenSequence.getIndexAtPosition(pi);
int topic = topicSequence.getIndexAtPosition(pi);
out.print(doc); out.print(' ');
out.print(source); out.print(' ');
out.print(pi); out.print(' ');
out.print(type); out.print(' ');
out.print(alphabet.lookupObject(type)); out.print(' ');
out.print(topic); out.println();
}
}
}
public double modelLogLikelihood() {
double logLikelihood = 0.0;
int nonZeroTopics;
// The likelihood of the model is a combination of a
// Dirichlet-multinomial for the words in each topic
// and a Dirichlet-multinomial for the topics in each
// document.
// The likelihood function of a dirichlet multinomial is
// Gamma( sum_i alpha_i ) prod_i Gamma( alpha_i + N_i )
// prod_i Gamma( alpha_i ) Gamma( sum_i (alpha_i + N_i) )
// So the log likelihood is
// logGamma ( sum_i alpha_i ) - logGamma ( sum_i (alpha_i + N_i) ) +
// sum_i [ logGamma( alpha_i + N_i) - logGamma( alpha_i ) ]
// Do the documents first
int[] topicCounts = new int[numTopics];
double[] topicLogGammas = new double[numTopics];
int[] docTopics;
for (int topic=0; topic < numTopics; topic++) {
topicLogGammas[ topic ] = Dirichlet.logGammaStirling( alpha[topic] );
}
for (int doc=0; doc < data.size(); doc++) {
LabelSequence topicSequence = (LabelSequence) data.get(doc).topicSequence;
docTopics = topicSequence.getFeatures();
for (int token=0; token < docTopics.length; token++) {
topicCounts[ docTopics[token] ]++;
}
for (int topic=0; topic < numTopics; topic++) {
if (topicCounts[topic] > 0) {
logLikelihood += (Dirichlet.logGammaStirling(alpha[topic] + topicCounts[topic]) -
topicLogGammas[ topic ]);
}
}
// subtract the (count + parameter) sum term
logLikelihood -= Dirichlet.logGammaStirling(alphaSum + docTopics.length);
Arrays.fill(topicCounts, 0);
}
// add the parameter sum term
logLikelihood += data.size() * Dirichlet.logGammaStirling(alphaSum);
// And the topics
// Count the number of type-topic pairs
int nonZeroTypeTopics = 0;
for (int type=0; type < numTypes; type++) {
// reuse this array as a pointer
topicCounts = typeTopicCounts[type];
int index = 0;
while (index < topicCounts.length &&
topicCounts[index] > 0) {
int topic = topicCounts[index] & topicMask;
int count = topicCounts[index] >> topicBits;
nonZeroTypeTopics++;
logLikelihood += Dirichlet.logGammaStirling(beta + count);
if (Double.isNaN(logLikelihood)) {
System.err.println(count);
System.exit(1);
}
index++;
}
}
for (int topic=0; topic < numTopics; topic++) {
logLikelihood -=
Dirichlet.logGammaStirling( (beta * numTypes) +
tokensPerTopic[ topic ] );
if (Double.isNaN(logLikelihood)) {
logger.info("after topic " + topic + " " + tokensPerTopic[ topic ]);
System.exit(1);
}
}
logLikelihood +=
(Dirichlet.logGammaStirling(beta * numTypes)) -
(Dirichlet.logGammaStirling(beta) * nonZeroTypeTopics);
if (Double.isNaN(logLikelihood)) {
logger.info("at the end");
System.exit(1);
}
return logLikelihood;
}
/** Return a tool for estimating topic distributions for new documents */
public TopicInferencer getInferencer() {
return new TopicInferencer(typeTopicCounts, tokensPerTopic,
data.get(0).instance.getDataAlphabet(),
alpha, beta, betaSum);
}
/** Return a tool for evaluating the marginal probability of new documents
* under this model */
public MarginalProbEstimator getProbEstimator() {
return new MarginalProbEstimator(numTopics, alpha, alphaSum, beta,
typeTopicCounts, tokensPerTopic);
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private static final int NULL_INTEGER = -1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject(data);
out.writeObject(alphabet);
out.writeObject(topicAlphabet);
out.writeInt(numTopics);
out.writeInt(topicMask);
out.writeInt(topicBits);
out.writeInt(numTypes);
out.writeObject(alpha);
out.writeDouble(alphaSum);
out.writeDouble(beta);
out.writeDouble(betaSum);
out.writeObject(typeTopicCounts);
out.writeObject(tokensPerTopic);
out.writeObject(docLengthCounts);
out.writeObject(topicDocCounts);
out.writeInt(numIterations);
out.writeInt(burninPeriod);
out.writeInt(saveSampleInterval);
out.writeInt(optimizeInterval);
out.writeInt(showTopicsInterval);
out.writeInt(wordsPerTopic);
out.writeInt(saveStateInterval);
out.writeObject(stateFilename);
out.writeInt(saveModelInterval);
out.writeObject(modelFilename);
out.writeInt(randomSeed);
out.writeObject(formatter);
out.writeBoolean(printLogLikelihood);
out.writeInt(numThreads);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
data = (ArrayList<TopicAssignment>) in.readObject ();
alphabet = (Alphabet) in.readObject();
topicAlphabet = (LabelAlphabet) in.readObject();
numTopics = in.readInt();
topicMask = in.readInt();
topicBits = in.readInt();
numTypes = in.readInt();
alpha = (double[]) in.readObject();
alphaSum = in.readDouble();
beta = in.readDouble();
betaSum = in.readDouble();
typeTopicCounts = (int[][]) in.readObject();
tokensPerTopic = (int[]) in.readObject();
docLengthCounts = (int[]) in.readObject();
topicDocCounts = (int[][]) in.readObject();
numIterations = in.readInt();
burninPeriod = in.readInt();
saveSampleInterval = in.readInt();
optimizeInterval = in.readInt();
showTopicsInterval = in.readInt();
wordsPerTopic = in.readInt();
saveStateInterval = in.readInt();
stateFilename = (String) in.readObject();
saveModelInterval = in.readInt();
modelFilename = (String) in.readObject();
randomSeed = in.readInt();
formatter = (NumberFormat) in.readObject();
printLogLikelihood = in.readBoolean();
numThreads = in.readInt();
}
public void write (File serializedModelFile) {
try {
ObjectOutputStream oos = new ObjectOutputStream (new FileOutputStream(serializedModelFile));
oos.writeObject(this);
oos.close();
} catch (IOException e) {
System.err.println("Problem serializing ParallelTopicModel to file " +
serializedModelFile + ": " + e);
}
}
public static ParallelTopicModel read (File f) throws Exception {
ParallelTopicModel topicModel = null;
ObjectInputStream ois = new ObjectInputStream (new FileInputStream(f));
topicModel = (ParallelTopicModel) ois.readObject();
ois.close();
return topicModel;
}
public static void main (String[] args) {
try {
InstanceList training = InstanceList.load (new File(args[0]));
int numTopics = args.length > 1 ? Integer.parseInt(args[1]) : 200;
ParallelTopicModel lda = new ParallelTopicModel (numTopics, 50.0, 0.01);
lda.printLogLikelihood = true;
lda.setTopicDisplay(50, 7);
lda.addInstances(training);
lda.setNumThreads(Integer.parseInt(args[2]));
lda.estimate();
logger.info("printing state");
lda.printState(new File("state.gz"));
logger.info("finished printing");
} catch (Exception e) {
e.printStackTrace();
}
}
}
| 48,315 | 28.714637 | 143 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/PolylingualTopicModel.java | /* Copyright (C) 2005 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.topics;
import java.util.*;
import java.util.zip.*;
import java.io.*;
import java.text.NumberFormat;
import cc.mallet.types.*;
import cc.mallet.util.Randoms;
/**
* Latent Dirichlet Allocation for loosely parallel corpora in arbitrary languages
*
* @author David Mimno, Andrew McCallum
*/
public class PolylingualTopicModel implements Serializable {
// Analogous to a cc.mallet.classify.Classification
public class TopicAssignment implements Serializable {
public Instance[] instances;
public LabelSequence[] topicSequences;
public Labeling topicDistribution;
public TopicAssignment (Instance[] instances, LabelSequence[] topicSequences) {
this.instances = instances;
this.topicSequences = topicSequences;
}
}
int numLanguages = 1;
protected ArrayList<TopicAssignment> data; // the training instances and their topic assignments
protected LabelAlphabet topicAlphabet; // the alphabet for the topics
protected int numStopwords = 0;
protected int numTopics; // Number of topics to be fit
HashSet<String> testingIDs = null;
// These values are used to encode type/topic counts as
// count/topic pairs in a single int.
protected int topicMask;
protected int topicBits;
protected Alphabet[] alphabets;
protected int[] vocabularySizes;
protected double[] alpha; // Dirichlet(alpha,alpha,...) is the distribution over topics
protected double alphaSum;
protected double[] betas; // Prior on per-topic multinomial distribution over words
protected double[] betaSums;
protected int[] languageMaxTypeCounts;
public static final double DEFAULT_BETA = 0.01;
protected double[] languageSmoothingOnlyMasses;
protected double[][] languageCachedCoefficients;
int topicTermCount = 0;
int betaTopicCount = 0;
int smoothingOnlyCount = 0;
// An array to put the topic counts for the current document.
// Initialized locally below. Defined here to avoid
// garbage collection overhead.
protected int[] oneDocTopicCounts; // indexed by <document index, topic index>
protected int[][][] languageTypeTopicCounts; // indexed by <feature index, topic index>
protected int[][] languageTokensPerTopic; // indexed by <topic index>
// for dirichlet estimation
protected int[] docLengthCounts; // histogram of document sizes, summed over languages
protected int[][] topicDocCounts; // histogram of document/topic counts, indexed by <topic index, sequence position index>
protected int iterationsSoFar = 1;
public int numIterations = 1000;
public int burninPeriod = 5;
public int saveSampleInterval = 5; // was 10;
public int optimizeInterval = 10;
public int showTopicsInterval = 10; // was 50;
public int wordsPerTopic = 7;
protected int outputModelInterval = 0;
protected String outputModelFilename;
protected int saveStateInterval = 0;
protected String stateFilename = null;
protected Randoms random;
protected NumberFormat formatter;
protected boolean printLogLikelihood = false;
public PolylingualTopicModel (int numberOfTopics) {
this (numberOfTopics, numberOfTopics);
}
public PolylingualTopicModel (int numberOfTopics, double alphaSum) {
this (numberOfTopics, alphaSum, new Randoms());
}
private static LabelAlphabet newLabelAlphabet (int numTopics) {
LabelAlphabet ret = new LabelAlphabet();
for (int i = 0; i < numTopics; i++)
ret.lookupIndex("topic"+i);
return ret;
}
public PolylingualTopicModel (int numberOfTopics, double alphaSum, Randoms random) {
this (newLabelAlphabet (numberOfTopics), alphaSum, random);
}
public PolylingualTopicModel (LabelAlphabet topicAlphabet, double alphaSum, Randoms random)
{
this.data = new ArrayList<TopicAssignment>();
this.topicAlphabet = topicAlphabet;
this.numTopics = topicAlphabet.size();
if (Integer.bitCount(numTopics) == 1) {
// exact power of 2
topicMask = numTopics - 1;
topicBits = Integer.bitCount(topicMask);
}
else {
// otherwise add an extra bit
topicMask = Integer.highestOneBit(numTopics) * 2 - 1;
topicBits = Integer.bitCount(topicMask);
}
this.alphaSum = alphaSum;
this.alpha = new double[numTopics];
Arrays.fill(alpha, alphaSum / numTopics);
this.random = random;
formatter = NumberFormat.getInstance();
formatter.setMaximumFractionDigits(5);
System.err.println("Polylingual LDA: " + numTopics + " topics, " + topicBits + " topic bits, " +
Integer.toBinaryString(topicMask) + " topic mask");
}
public void loadTestingIDs(File testingIDFile) throws IOException {
testingIDs = new HashSet();
BufferedReader in = new BufferedReader(new FileReader(testingIDFile));
String id = null;
while ((id = in.readLine()) != null) {
testingIDs.add(id);
}
in.close();
}
public LabelAlphabet getTopicAlphabet() { return topicAlphabet; }
public int getNumTopics() { return numTopics; }
public ArrayList<TopicAssignment> getData() { return data; }
public void setNumIterations (int numIterations) {
this.numIterations = numIterations;
}
public void setBurninPeriod (int burninPeriod) {
this.burninPeriod = burninPeriod;
}
public void setTopicDisplay(int interval, int n) {
this.showTopicsInterval = interval;
this.wordsPerTopic = n;
}
public void setRandomSeed(int seed) {
random = new Randoms(seed);
}
public void setOptimizeInterval(int interval) {
this.optimizeInterval = interval;
}
public void setModelOutput(int interval, String filename) {
this.outputModelInterval = interval;
this.outputModelFilename = filename;
}
/** Define how often and where to save the state
*
* @param interval Save a copy of the state every <code>interval</code> iterations.
* @param filename Save the state to this file, with the iteration number as a suffix
*/
public void setSaveState(int interval, String filename) {
this.saveStateInterval = interval;
this.stateFilename = filename;
}
public void addInstances (InstanceList[] training) {
numLanguages = training.length;
languageTokensPerTopic = new int[numLanguages][numTopics];
alphabets = new Alphabet[ numLanguages ];
vocabularySizes = new int[ numLanguages ];
betas = new double[ numLanguages ];
betaSums = new double[ numLanguages ];
languageMaxTypeCounts = new int[ numLanguages ];
languageTypeTopicCounts = new int[ numLanguages ][][];
int numInstances = training[0].size();
HashSet[] stoplists = new HashSet[ numLanguages ];
for (int language = 0; language < numLanguages; language++) {
if (training[language].size() != numInstances) {
System.err.println("Warning: language " + language + " has " +
training[language].size() + " instances, lang 0 has " +
numInstances);
}
alphabets[ language ] = training[ language ].getDataAlphabet();
vocabularySizes[ language ] = alphabets[ language ].size();
betas[language] = DEFAULT_BETA;
betaSums[language] = betas[language] * vocabularySizes[ language ];
languageTypeTopicCounts[language] = new int[ vocabularySizes[language] ][];
int[][] typeTopicCounts = languageTypeTopicCounts[language];
// Get the total number of occurrences of each word type
int[] typeTotals = new int[ vocabularySizes[language] ];
for (Instance instance : training[language]) {
if (testingIDs != null &&
testingIDs.contains(instance.getName())) {
continue;
}
FeatureSequence tokens = (FeatureSequence) instance.getData();
for (int position = 0; position < tokens.getLength(); position++) {
int type = tokens.getIndexAtPosition(position);
typeTotals[ type ]++;
}
}
/* Automatic stoplist creation, currently disabled
TreeSet<IDSorter> sortedWords = new TreeSet<IDSorter>();
for (int type = 0; type < vocabularySizes[language]; type++) {
sortedWords.add(new IDSorter(type, typeTotals[type]));
}
stoplists[language] = new HashSet<Integer>();
Iterator<IDSorter> typeIterator = sortedWords.iterator();
int totalStopwords = 0;
while (typeIterator.hasNext() && totalStopwords < numStopwords) {
stoplists[language].add(typeIterator.next().getID());
}
*/
// Allocate enough space so that we never have to worry about
// overflows: either the number of topics or the number of times
// the type occurs.
for (int type = 0; type < vocabularySizes[language]; type++) {
if (typeTotals[type] > languageMaxTypeCounts[language]) {
languageMaxTypeCounts[language] = typeTotals[type];
}
typeTopicCounts[type] = new int[ Math.min(numTopics, typeTotals[type]) ];
}
}
for (int doc = 0; doc < numInstances; doc++) {
if (testingIDs != null &&
testingIDs.contains(training[0].get(doc).getName())) {
continue;
}
Instance[] instances = new Instance[ numLanguages ];
LabelSequence[] topicSequences = new LabelSequence[ numLanguages ];
for (int language = 0; language < numLanguages; language++) {
int[][] typeTopicCounts = languageTypeTopicCounts[language];
int[] tokensPerTopic = languageTokensPerTopic[language];
instances[language] = training[language].get(doc);
FeatureSequence tokens = (FeatureSequence) instances[language].getData();
topicSequences[language] =
new LabelSequence(topicAlphabet, new int[ tokens.size() ]);
int[] topics = topicSequences[language].getFeatures();
for (int position = 0; position < tokens.size(); position++) {
int type = tokens.getIndexAtPosition(position);
int[] currentTypeTopicCounts = typeTopicCounts[ type ];
int topic = random.nextInt(numTopics);
// If the word is one of the [numStopwords] most
// frequent words, put it in a non-sampled topic.
//if (stoplists[language].contains(type)) {
// topic = -1;
//}
topics[position] = topic;
tokensPerTopic[topic]++;
// The format for these arrays is
// the topic in the rightmost bits
// the count in the remaining (left) bits.
// Since the count is in the high bits, sorting (desc)
// by the numeric value of the int guarantees that
// higher counts will be before the lower counts.
// Start by assuming that the array is either empty
// or is in sorted (descending) order.
// Here we are only adding counts, so if we find
// an existing location with the topic, we only need
// to ensure that it is not larger than its left neighbor.
int index = 0;
int currentTopic = currentTypeTopicCounts[index] & topicMask;
int currentValue;
while (currentTypeTopicCounts[index] > 0 && currentTopic != topic) {
index++;
/*
// Debugging output...
if (index >= currentTypeTopicCounts.length) {
for (int i=0; i < currentTypeTopicCounts.length; i++) {
System.out.println((currentTypeTopicCounts[i] & topicMask) + ":" +
(currentTypeTopicCounts[i] >> topicBits) + " ");
}
System.out.println(type + " " + typeTotals[type]);
}
*/
currentTopic = currentTypeTopicCounts[index] & topicMask;
}
currentValue = currentTypeTopicCounts[index] >> topicBits;
if (currentValue == 0) {
// new value is 1, so we don't have to worry about sorting
// (except by topic suffix, which doesn't matter)
currentTypeTopicCounts[index] =
(1 << topicBits) + topic;
}
else {
currentTypeTopicCounts[index] =
((currentValue + 1) << topicBits) + topic;
// Now ensure that the array is still sorted by
// bubbling this value up.
while (index > 0 &&
currentTypeTopicCounts[index] > currentTypeTopicCounts[index - 1]) {
int temp = currentTypeTopicCounts[index];
currentTypeTopicCounts[index] = currentTypeTopicCounts[index - 1];
currentTypeTopicCounts[index - 1] = temp;
index--;
}
}
}
}
TopicAssignment t = new TopicAssignment (instances, topicSequences);
data.add (t);
}
initializeHistograms();
languageSmoothingOnlyMasses = new double[ numLanguages ];
languageCachedCoefficients = new double[ numLanguages ][ numTopics ];
cacheValues();
}
/**
* Gather statistics on the size of documents
* and create histograms for use in Dirichlet hyperparameter
* optimization.
*/
private void initializeHistograms() {
int maxTokens = 0;
int totalTokens = 0;
for (int doc = 0; doc < data.size(); doc++) {
int length = 0;
for (LabelSequence sequence : data.get(doc).topicSequences) {
length += sequence.getLength();
}
if (length > maxTokens) {
maxTokens = length;
}
totalTokens += length;
}
System.err.println("max tokens: " + maxTokens);
System.err.println("total tokens: " + totalTokens);
docLengthCounts = new int[maxTokens + 1];
topicDocCounts = new int[numTopics][maxTokens + 1];
}
private void cacheValues() {
for (int language = 0; language < numLanguages; language++) {
languageSmoothingOnlyMasses[language] = 0.0;
for (int topic=0; topic < numTopics; topic++) {
languageSmoothingOnlyMasses[language] +=
alpha[topic] * betas[language] /
(languageTokensPerTopic[language][topic] + betaSums[language]);
languageCachedCoefficients[language][topic] =
alpha[topic] / (languageTokensPerTopic[language][topic] + betaSums[language]);
}
}
}
private void clearHistograms() {
Arrays.fill(docLengthCounts, 0);
for (int topic = 0; topic < topicDocCounts.length; topic++)
Arrays.fill(topicDocCounts[topic], 0);
}
public void estimate () throws IOException {
estimate (numIterations);
}
public void estimate (int iterationsThisRound) throws IOException {
long startTime = System.currentTimeMillis();
int maxIteration = iterationsSoFar + iterationsThisRound;
long totalTime = 0;
for ( ; iterationsSoFar <= maxIteration; iterationsSoFar++) {
long iterationStart = System.currentTimeMillis();
if (showTopicsInterval != 0 && iterationsSoFar != 0 && iterationsSoFar % showTopicsInterval == 0) {
System.out.println();
printTopWords (System.out, wordsPerTopic, false);
}
if (saveStateInterval != 0 && iterationsSoFar % saveStateInterval == 0) {
this.printState(new File(stateFilename + '.' + iterationsSoFar));
}
/*
if (outputModelInterval != 0 && iterations % outputModelInterval == 0) {
this.write (new File(outputModelFilename+'.'+iterations));
}
*/
// TODO this condition should also check that we have more than one sample to work with here
// (The number of samples actually obtained is not yet tracked.)
if (iterationsSoFar > burninPeriod && optimizeInterval != 0 &&
iterationsSoFar % optimizeInterval == 0) {
alphaSum = Dirichlet.learnParameters(alpha, topicDocCounts, docLengthCounts);
optimizeBetas();
clearHistograms();
cacheValues();
}
// Loop over every document in the corpus
topicTermCount = betaTopicCount = smoothingOnlyCount = 0;
for (int doc = 0; doc < data.size(); doc++) {
sampleTopicsForOneDoc (data.get(doc),
(iterationsSoFar >= burninPeriod &&
iterationsSoFar % saveSampleInterval == 0));
}
long elapsedMillis = System.currentTimeMillis() - iterationStart;
totalTime += elapsedMillis;
if ((iterationsSoFar + 1) % 10 == 0) {
double ll = modelLogLikelihood();
System.out.println(elapsedMillis + "\t" + totalTime + "\t" +
ll);
}
else {
System.out.print(elapsedMillis + " ");
}
}
/*
long seconds = Math.round((System.currentTimeMillis() - startTime)/1000.0);
long minutes = seconds / 60; seconds %= 60;
long hours = minutes / 60; minutes %= 60;
long days = hours / 24; hours %= 24;
System.out.print ("\nTotal time: ");
if (days != 0) { System.out.print(days); System.out.print(" days "); }
if (hours != 0) { System.out.print(hours); System.out.print(" hours "); }
if (minutes != 0) { System.out.print(minutes); System.out.print(" minutes "); }
System.out.print(seconds); System.out.println(" seconds");
*/
}
public void optimizeBetas() {
for (int language = 0; language < numLanguages; language++) {
// The histogram starts at count 0, so if all of the
// tokens of the most frequent type were assigned to one topic,
// we would need to store a maxTypeCount + 1 count.
int[] countHistogram = new int[languageMaxTypeCounts[language] + 1];
// Now count the number of type/topic pairs that have
// each number of tokens.
int[][] typeTopicCounts = languageTypeTopicCounts[language];
int[] tokensPerTopic = languageTokensPerTopic[language];
int index;
for (int type = 0; type < vocabularySizes[language]; type++) {
int[] counts = typeTopicCounts[type];
index = 0;
while (index < counts.length &&
counts[index] > 0) {
int count = counts[index] >> topicBits;
countHistogram[count]++;
index++;
}
}
// Figure out how large we need to make the "observation lengths"
// histogram.
int maxTopicSize = 0;
for (int topic = 0; topic < numTopics; topic++) {
if (tokensPerTopic[topic] > maxTopicSize) {
maxTopicSize = tokensPerTopic[topic];
}
}
// Now allocate it and populate it.
int[] topicSizeHistogram = new int[maxTopicSize + 1];
for (int topic = 0; topic < numTopics; topic++) {
topicSizeHistogram[ tokensPerTopic[topic] ]++;
}
betaSums[language] = Dirichlet.learnSymmetricConcentration(countHistogram,
topicSizeHistogram,
vocabularySizes[ language ],
betaSums[language]);
betas[language] = betaSums[language] / vocabularySizes[ language ];
}
}
protected void sampleTopicsForOneDoc (TopicAssignment topicAssignment,
boolean shouldSaveState) {
int[] currentTypeTopicCounts;
int type, oldTopic, newTopic;
double topicWeightsSum;
int[] localTopicCounts = new int[numTopics];
int[] localTopicIndex = new int[numTopics];
for (int language = 0; language < numLanguages; language++) {
int[] oneDocTopics =
topicAssignment.topicSequences[language].getFeatures();
int docLength =
topicAssignment.topicSequences[language].getLength();
// populate topic counts
for (int position = 0; position < docLength; position++) {
localTopicCounts[oneDocTopics[position]]++;
}
}
// Build an array that densely lists the topics that
// have non-zero counts.
int denseIndex = 0;
for (int topic = 0; topic < numTopics; topic++) {
if (localTopicCounts[topic] != 0) {
localTopicIndex[denseIndex] = topic;
denseIndex++;
}
}
// Record the total number of non-zero topics
int nonZeroTopics = denseIndex;
for (int language = 0; language < numLanguages; language++) {
int[] oneDocTopics =
topicAssignment.topicSequences[language].getFeatures();
int docLength =
topicAssignment.topicSequences[language].getLength();
FeatureSequence tokenSequence =
(FeatureSequence) topicAssignment.instances[language].getData();
int[][] typeTopicCounts = languageTypeTopicCounts[language];
int[] tokensPerTopic = languageTokensPerTopic[language];
double beta = betas[language];
double betaSum = betaSums[language];
// Initialize the smoothing-only sampling bucket
double smoothingOnlyMass = languageSmoothingOnlyMasses[language];
//for (int topic = 0; topic < numTopics; topic++)
//smoothingOnlyMass += alpha[topic] * beta / (tokensPerTopic[topic] + betaSum);
// Initialize the cached coefficients, using only smoothing.
//cachedCoefficients = new double[ numTopics ];
//for (int topic=0; topic < numTopics; topic++)
// cachedCoefficients[topic] = alpha[topic] / (tokensPerTopic[topic] + betaSum);
double[] cachedCoefficients =
languageCachedCoefficients[language];
// Initialize the topic count/beta sampling bucket
double topicBetaMass = 0.0;
// Initialize cached coefficients and the topic/beta
// normalizing constant.
for (denseIndex = 0; denseIndex < nonZeroTopics; denseIndex++) {
int topic = localTopicIndex[denseIndex];
int n = localTopicCounts[topic];
// initialize the normalization constant for the (B * n_{t|d}) term
topicBetaMass += beta * n / (tokensPerTopic[topic] + betaSum);
// update the coefficients for the non-zero topics
cachedCoefficients[topic] = (alpha[topic] + n) / (tokensPerTopic[topic] + betaSum);
}
double topicTermMass = 0.0;
double[] topicTermScores = new double[numTopics];
int[] topicTermIndices;
int[] topicTermValues;
int i;
double score;
// Iterate over the positions (words) in the document
for (int position = 0; position < docLength; position++) {
type = tokenSequence.getIndexAtPosition(position);
oldTopic = oneDocTopics[position];
if (oldTopic == -1) { continue; }
currentTypeTopicCounts = typeTopicCounts[type];
// Remove this token from all counts.
// Remove this topic's contribution to the
// normalizing constants
smoothingOnlyMass -= alpha[oldTopic] * beta /
(tokensPerTopic[oldTopic] + betaSum);
topicBetaMass -= beta * localTopicCounts[oldTopic] /
(tokensPerTopic[oldTopic] + betaSum);
// Decrement the local doc/topic counts
localTopicCounts[oldTopic]--;
// Maintain the dense index, if we are deleting
// the old topic
if (localTopicCounts[oldTopic] == 0) {
// First get to the dense location associated with
// the old topic.
denseIndex = 0;
// We know it's in there somewhere, so we don't
// need bounds checking.
while (localTopicIndex[denseIndex] != oldTopic) {
denseIndex++;
}
// shift all remaining dense indices to the left.
while (denseIndex < nonZeroTopics) {
if (denseIndex < localTopicIndex.length - 1) {
localTopicIndex[denseIndex] =
localTopicIndex[denseIndex + 1];
}
denseIndex++;
}
nonZeroTopics --;
}
// Decrement the global topic count totals
tokensPerTopic[oldTopic]--;
//assert(tokensPerTopic[oldTopic] >= 0) : "old Topic " + oldTopic + " below 0";
// Add the old topic's contribution back into the
// normalizing constants.
smoothingOnlyMass += alpha[oldTopic] * beta /
(tokensPerTopic[oldTopic] + betaSum);
topicBetaMass += beta * localTopicCounts[oldTopic] /
(tokensPerTopic[oldTopic] + betaSum);
// Reset the cached coefficient for this topic
cachedCoefficients[oldTopic] =
(alpha[oldTopic] + localTopicCounts[oldTopic]) /
(tokensPerTopic[oldTopic] + betaSum);
// Now go over the type/topic counts, decrementing
// where appropriate, and calculating the score
// for each topic at the same time.
int index = 0;
int currentTopic, currentValue;
boolean alreadyDecremented = false;
topicTermMass = 0.0;
while (index < currentTypeTopicCounts.length &&
currentTypeTopicCounts[index] > 0) {
currentTopic = currentTypeTopicCounts[index] & topicMask;
currentValue = currentTypeTopicCounts[index] >> topicBits;
if (! alreadyDecremented &&
currentTopic == oldTopic) {
// We're decrementing and adding up the
// sampling weights at the same time, but
// decrementing may require us to reorder
// the topics, so after we're done here,
// look at this cell in the array again.
currentValue --;
if (currentValue == 0) {
currentTypeTopicCounts[index] = 0;
}
else {
currentTypeTopicCounts[index] =
(currentValue << topicBits) + oldTopic;
}
// Shift the reduced value to the right, if necessary.
int subIndex = index;
while (subIndex < currentTypeTopicCounts.length - 1 &&
currentTypeTopicCounts[subIndex] < currentTypeTopicCounts[subIndex + 1]) {
int temp = currentTypeTopicCounts[subIndex];
currentTypeTopicCounts[subIndex] = currentTypeTopicCounts[subIndex + 1];
currentTypeTopicCounts[subIndex + 1] = temp;
subIndex++;
}
alreadyDecremented = true;
}
else {
score =
cachedCoefficients[currentTopic] * currentValue;
topicTermMass += score;
topicTermScores[index] = score;
index++;
}
}
double sample = random.nextUniform() * (smoothingOnlyMass + topicBetaMass + topicTermMass);
double origSample = sample;
// Make sure it actually gets set
newTopic = -1;
if (sample < topicTermMass) {
//topicTermCount++;
i = -1;
while (sample > 0) {
i++;
sample -= topicTermScores[i];
}
newTopic = currentTypeTopicCounts[i] & topicMask;
currentValue = currentTypeTopicCounts[i] >> topicBits;
currentTypeTopicCounts[i] = ((currentValue + 1) << topicBits) + newTopic;
// Bubble the new value up, if necessary
while (i > 0 &&
currentTypeTopicCounts[i] > currentTypeTopicCounts[i - 1]) {
int temp = currentTypeTopicCounts[i];
currentTypeTopicCounts[i] = currentTypeTopicCounts[i - 1];
currentTypeTopicCounts[i - 1] = temp;
i--;
}
}
else {
sample -= topicTermMass;
if (sample < topicBetaMass) {
//betaTopicCount++;
sample /= beta;
for (denseIndex = 0; denseIndex < nonZeroTopics; denseIndex++) {
int topic = localTopicIndex[denseIndex];
sample -= localTopicCounts[topic] /
(tokensPerTopic[topic] + betaSum);
if (sample <= 0.0) {
newTopic = topic;
break;
}
}
}
else {
//smoothingOnlyCount++;
sample -= topicBetaMass;
sample /= beta;
newTopic = 0;
sample -= alpha[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
while (sample > 0.0) {
newTopic++;
sample -= alpha[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
}
}
// Move to the position for the new topic,
// which may be the first empty position if this
// is a new topic for this word.
index = 0;
while (currentTypeTopicCounts[index] > 0 &&
(currentTypeTopicCounts[index] & topicMask) != newTopic) {
index++;
}
// index should now be set to the position of the new topic,
// which may be an empty cell at the end of the list.
if (currentTypeTopicCounts[index] == 0) {
// inserting a new topic, guaranteed to be in
// order w.r.t. count, if not topic.
currentTypeTopicCounts[index] = (1 << topicBits) + newTopic;
}
else {
currentValue = currentTypeTopicCounts[index] >> topicBits;
currentTypeTopicCounts[index] = ((currentValue + 1) << topicBits) + newTopic;
// Bubble the increased value left, if necessary
while (index > 0 &&
currentTypeTopicCounts[index] > currentTypeTopicCounts[index - 1]) {
int temp = currentTypeTopicCounts[index];
currentTypeTopicCounts[index] = currentTypeTopicCounts[index - 1];
currentTypeTopicCounts[index - 1] = temp;
index--;
}
}
}
if (newTopic == -1) {
System.err.println("PolylingualTopicModel sampling error: "+ origSample + " " + sample + " " + smoothingOnlyMass + " " +
topicBetaMass + " " + topicTermMass);
newTopic = numTopics-1; // TODO is this appropriate
//throw new IllegalStateException ("PolylingualTopicModel: New topic not sampled.");
}
//assert(newTopic != -1);
// Put that new topic into the counts
oneDocTopics[position] = newTopic;
smoothingOnlyMass -= alpha[newTopic] * beta /
(tokensPerTopic[newTopic] + betaSum);
topicBetaMass -= beta * localTopicCounts[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
localTopicCounts[newTopic]++;
// If this is a new topic for this document,
// add the topic to the dense index.
if (localTopicCounts[newTopic] == 1) {
// First find the point where we
// should insert the new topic by going to
// the end (which is the only reason we're keeping
// track of the number of non-zero
// topics) and working backwards
denseIndex = nonZeroTopics;
while (denseIndex > 0 &&
localTopicIndex[denseIndex - 1] > newTopic) {
localTopicIndex[denseIndex] =
localTopicIndex[denseIndex - 1];
denseIndex--;
}
localTopicIndex[denseIndex] = newTopic;
nonZeroTopics++;
}
tokensPerTopic[newTopic]++;
// update the coefficients for the non-zero topics
cachedCoefficients[newTopic] =
(alpha[newTopic] + localTopicCounts[newTopic]) /
(tokensPerTopic[newTopic] + betaSum);
smoothingOnlyMass += alpha[newTopic] * beta /
(tokensPerTopic[newTopic] + betaSum);
topicBetaMass += beta * localTopicCounts[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
// Save the smoothing-only mass to the global cache
languageSmoothingOnlyMasses[language] = smoothingOnlyMass;
}
}
if (shouldSaveState) {
// Update the document-topic count histogram,
// for dirichlet estimation
int totalLength = 0;
for (denseIndex = 0; denseIndex < nonZeroTopics; denseIndex++) {
int topic = localTopicIndex[denseIndex];
topicDocCounts[topic][ localTopicCounts[topic] ]++;
totalLength += localTopicCounts[topic];
}
docLengthCounts[ totalLength ]++;
}
}
public void printTopWords (File file, int numWords, boolean useNewLines) throws IOException {
PrintStream out = new PrintStream (file);
printTopWords(out, numWords, useNewLines);
out.close();
}
public void printTopWords (PrintStream out, int numWords, boolean usingNewLines) {
TreeSet[][] languageTopicSortedWords = new TreeSet[numLanguages][numTopics];
for (int language = 0; language < numLanguages; language++) {
TreeSet[] topicSortedWords = languageTopicSortedWords[language];
int[][] typeTopicCounts = languageTypeTopicCounts[language];
for (int topic = 0; topic < numTopics; topic++) {
topicSortedWords[topic] = new TreeSet<IDSorter>();
}
for (int type = 0; type < vocabularySizes[language]; type++) {
int[] topicCounts = typeTopicCounts[type];
int index = 0;
while (index < topicCounts.length &&
topicCounts[index] > 0) {
int topic = topicCounts[index] & topicMask;
int count = topicCounts[index] >> topicBits;
topicSortedWords[topic].add(new IDSorter(type, count));
index++;
}
}
}
for (int topic = 0; topic < numTopics; topic++) {
out.println (topic + "\t" + formatter.format(alpha[topic]));
for (int language = 0; language < numLanguages; language++) {
out.print(" " + language + "\t" + languageTokensPerTopic[language][topic] + "\t" + betas[language] + "\t");
TreeSet<IDSorter> sortedWords = languageTopicSortedWords[language][topic];
Alphabet alphabet = alphabets[language];
int word = 1;
Iterator<IDSorter> iterator = sortedWords.iterator();
while (iterator.hasNext() && word < numWords) {
IDSorter info = iterator.next();
out.print(alphabet.lookupObject(info.getID()) + " ");
word++;
}
out.println();
}
}
}
public void printDocumentTopics (File f) throws IOException {
printDocumentTopics (new PrintWriter (f, "UTF-8") );
}
public void printDocumentTopics (PrintWriter pw) {
printDocumentTopics (pw, 0.0, -1);
}
/**
* @param pw A print writer
* @param threshold Only print topics with proportion greater than this number
* @param max Print no more than this many topics
*/
public void printDocumentTopics (PrintWriter pw, double threshold, int max) {
pw.print ("#doc source topic proportion ...\n");
int docLength;
int[] topicCounts = new int[ numTopics ];
IDSorter[] sortedTopics = new IDSorter[ numTopics ];
for (int topic = 0; topic < numTopics; topic++) {
// Initialize the sorters with dummy values
sortedTopics[topic] = new IDSorter(topic, topic);
}
if (max < 0 || max > numTopics) {
max = numTopics;
}
for (int di = 0; di < data.size(); di++) {
pw.print (di); pw.print (' ');
int totalLength = 0;
for (int language = 0; language < numLanguages; language++) {
LabelSequence topicSequence = (LabelSequence) data.get(di).topicSequences[language];
int[] currentDocTopics = topicSequence.getFeatures();
docLength = topicSequence.getLength();
totalLength += docLength;
// Count up the tokens
for (int token=0; token < docLength; token++) {
topicCounts[ currentDocTopics[token] ]++;
}
}
// And normalize
for (int topic = 0; topic < numTopics; topic++) {
sortedTopics[topic].set(topic, (float) topicCounts[topic] / totalLength);
}
Arrays.sort(sortedTopics);
for (int i = 0; i < max; i++) {
if (sortedTopics[i].getWeight() < threshold) { break; }
pw.print (sortedTopics[i].getID() + " " +
sortedTopics[i].getWeight() + " ");
}
pw.print (" \n");
Arrays.fill(topicCounts, 0);
}
}
public void printState (File f) throws IOException {
PrintStream out =
new PrintStream(new GZIPOutputStream(new BufferedOutputStream(new FileOutputStream(f))),
false, "UTF-8");
printState(out);
out.close();
}
public void printState (PrintStream out) {
out.println ("#doc lang pos typeindex type topic");
for (int doc = 0; doc < data.size(); doc++) {
for (int language =0; language < numLanguages; language++) {
FeatureSequence tokenSequence = (FeatureSequence) data.get(doc).instances[language].getData();
LabelSequence topicSequence = (LabelSequence) data.get(doc).topicSequences[language];
for (int pi = 0; pi < topicSequence.getLength(); pi++) {
int type = tokenSequence.getIndexAtPosition(pi);
int topic = topicSequence.getIndexAtPosition(pi);
out.print(doc); out.print(' ');
out.print(language); out.print(' ');
out.print(pi); out.print(' ');
out.print(type); out.print(' ');
out.print(alphabets[language].lookupObject(type)); out.print(' ');
out.print(topic); out.println();
}
}
}
}
public double modelLogLikelihood() {
double logLikelihood = 0.0;
int nonZeroTopics;
// The likelihood of the model is a combination of a
// Dirichlet-multinomial for the words in each topic
// and a Dirichlet-multinomial for the topics in each
// document.
// The likelihood function of a dirichlet multinomial is
// Gamma( sum_i alpha_i ) prod_i Gamma( alpha_i + N_i )
// prod_i Gamma( alpha_i ) Gamma( sum_i (alpha_i + N_i) )
// So the log likelihood is
// logGamma ( sum_i alpha_i ) - logGamma ( sum_i (alpha_i + N_i) ) +
// sum_i [ logGamma( alpha_i + N_i) - logGamma( alpha_i ) ]
// Do the documents first
int[] topicCounts = new int[numTopics];
double[] topicLogGammas = new double[numTopics];
int[] docTopics;
for (int topic=0; topic < numTopics; topic++) {
topicLogGammas[ topic ] = Dirichlet.logGammaStirling( alpha[topic] );
}
for (int doc=0; doc < data.size(); doc++) {
int totalLength = 0;
for (int language = 0; language < numLanguages; language++) {
LabelSequence topicSequence = (LabelSequence) data.get(doc).topicSequences[language];
int[] currentDocTopics = topicSequence.getFeatures();
totalLength += topicSequence.getLength();
// Count up the tokens
for (int token=0; token < topicSequence.getLength(); token++) {
topicCounts[ currentDocTopics[token] ]++;
}
}
for (int topic=0; topic < numTopics; topic++) {
if (topicCounts[topic] > 0) {
logLikelihood += (Dirichlet.logGammaStirling(alpha[topic] + topicCounts[topic]) -
topicLogGammas[ topic ]);
}
}
// subtract the (count + parameter) sum term
logLikelihood -= Dirichlet.logGammaStirling(alphaSum + totalLength);
Arrays.fill(topicCounts, 0);
}
// add the parameter sum term
logLikelihood += data.size() * Dirichlet.logGammaStirling(alphaSum);
// And the topics
for (int language = 0; language < numLanguages; language++) {
int[][] typeTopicCounts = languageTypeTopicCounts[language];
int[] tokensPerTopic = languageTokensPerTopic[language];
double beta = betas[language];
// Count the number of type-topic pairs
int nonZeroTypeTopics = 0;
for (int type=0; type < vocabularySizes[language]; type++) {
// reuse this array as a pointer
topicCounts = typeTopicCounts[type];
int index = 0;
while (index < topicCounts.length &&
topicCounts[index] > 0) {
int topic = topicCounts[index] & topicMask;
int count = topicCounts[index] >> topicBits;
nonZeroTypeTopics++;
logLikelihood += Dirichlet.logGammaStirling(beta + count);
if (Double.isNaN(logLikelihood)) {
System.out.println(count);
System.exit(1);
}
index++;
}
}
for (int topic=0; topic < numTopics; topic++) {
logLikelihood -=
Dirichlet.logGammaStirling( (beta * numTopics) +
tokensPerTopic[ topic ] );
if (Double.isNaN(logLikelihood)) {
System.out.println("after topic " + topic + " " + tokensPerTopic[ topic ]);
System.exit(1);
}
}
logLikelihood +=
(Dirichlet.logGammaStirling(beta * numTopics)) -
(Dirichlet.logGammaStirling(beta) * nonZeroTypeTopics);
}
if (Double.isNaN(logLikelihood)) {
System.out.println("at the end");
System.exit(1);
}
return logLikelihood;
}
public static void main (String[] args) throws IOException {
if (args.length < 4) {
System.err.println("Usage: PolylingualTopicModel [num topics] [file to save state] [testing IDs file] [language 0 instances] ...");
System.exit(1);
}
int numTopics = Integer.parseInt(args[0]);
String stateFileName = args[1];
File testingIDsFile = new File(args[2]);
InstanceList[] training = new InstanceList[ args.length - 3 ];
for (int language=0; language < training.length; language++) {
training[language] = InstanceList.load(new File(args[language + 3]));
System.err.println("loaded " + args[language + 3]);
}
PolylingualTopicModel lda = new PolylingualTopicModel (numTopics, 2.0);
lda.printLogLikelihood = true;
lda.setTopicDisplay(50, 7);
lda.loadTestingIDs(testingIDsFile);
lda.addInstances(training);
lda.setSaveState(200, stateFileName);
lda.estimate();
lda.printState(new File(stateFileName));
}
}
| 39,598 | 30.009397 | 134 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/TopicalNGrams.java | /* Copyright (C) 2005 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.topics;
import java.util.Arrays;
import java.io.*;
import cc.mallet.types.*;
import cc.mallet.util.Randoms;
/**
* Like Latent Dirichlet Allocation, but with integrated phrase discovery.
* @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
* based on C code by Xuerui Wang.
*/
public class TopicalNGrams {
int numTopics;
Alphabet uniAlphabet;
Alphabet biAlphabet;
double alpha, beta, gamma, delta, tAlpha, vBeta, vGamma, delta1, delta2;
InstanceList ilist; // containing FeatureSequenceWithBigrams in the data field of each instance
int[][] topics; // {0...T-1}, the topic index, indexed by <document index, sequence index>
int[][] grams; // {0,1}, the bigram status, indexed by <document index, sequence index> TODO: Make this boolean?
int numTypes; // number of unique unigrams
int numBitypes; // number of unique bigrams
int numTokens; // total number of word occurrences
// "totalNgram"
int biTokens; // total number of tokens currently generated as bigrams (only used for progress messages)
// "docTopic"
int[][] docTopicCounts; // indexed by <document index, topic index>
// Used to calculate p(x|w,t). "ngramCount"
int[][][] typeNgramTopicCounts; // indexed by <feature index, ngram status, topic index>
// Used to calculate p(w|t) and p(w|t,w), "topicWord" and "topicNgramWord"
int[][] unitypeTopicCounts; // indexed by <feature index, topic index>
int[][] bitypeTopicCounts; // index by <bifeature index, topic index>
// "sumWords"
int[] tokensPerTopic; // indexed by <topic index>
// "sumNgramWords"
int[][] bitokensPerTopic; // indexed by <feature index, topic index>, where the later is the conditioned word
public TopicalNGrams (int numberOfTopics)
{
this (numberOfTopics, 50.0, 0.01, 0.01, 0.03, 0.2, 1000);
}
public TopicalNGrams (int numberOfTopics, double alphaSum, double beta, double gamma, double delta,
double delta1, double delta2)
{
this.numTopics = numberOfTopics;
this.alpha = alphaSum / numTopics; // smoothing over the choice of topic
this.beta = beta; // smoothing over the choice of unigram words
this.gamma = gamma; // smoothing over the choice of bigram words
this.delta = delta; // smoothing over the choice of unigram/bigram generation
this.delta1 = delta1; // TODO: Clean this up.
this.delta2 = delta2;
System.out.println("alpha :"+alphaSum);
System.out.println("beta :"+beta);
System.out.println("gamma :"+gamma);
System.out.println("delta :"+delta);
System.out.println("delta1 :"+delta1);
System.out.println("delta2 :"+delta2);
}
public void estimate (InstanceList documents, int numIterations, int showTopicsInterval,
int outputModelInterval, String outputModelFilename,
Randoms r)
{
ilist = documents;
uniAlphabet = ilist.getDataAlphabet();
biAlphabet = ((FeatureSequenceWithBigrams)ilist.get(0).getData()).getBiAlphabet();
numTypes = uniAlphabet.size();
numBitypes = biAlphabet.size();
int numDocs = ilist.size();
topics = new int[numDocs][];
grams = new int[numDocs][];
docTopicCounts = new int[numDocs][numTopics];
typeNgramTopicCounts = new int[numTypes][2][numTopics];
unitypeTopicCounts = new int[numTypes][numTopics];
bitypeTopicCounts = new int[numBitypes][numTopics];
tokensPerTopic = new int[numTopics];
bitokensPerTopic = new int[numTypes][numTopics];
tAlpha = alpha * numTopics;
vBeta = beta * numTypes;
vGamma = gamma * numTypes;
long startTime = System.currentTimeMillis();
// Initialize with random assignments of tokens to topics
// and finish allocating this.topics and this.tokens
int topic, gram, seqLen, fi;
for (int di = 0; di < numDocs; di++) {
FeatureSequenceWithBigrams fs = (FeatureSequenceWithBigrams) ilist.get(di).getData();
seqLen = fs.getLength();
numTokens += seqLen;
topics[di] = new int[seqLen];
grams[di] = new int[seqLen];
// Randomly assign tokens to topics
int prevFi = -1, prevTopic = -1;
for (int si = 0; si < seqLen; si++) {
// randomly sample a topic for the word at position si
topic = r.nextInt(numTopics);
// if a bigram is allowed at position si, then sample a gram status for it.
gram = (fs.getBiIndexAtPosition(si) == -1 ? 0 : r.nextInt(2));
if (gram != 0) biTokens++;
topics[di][si] = topic;
grams[di][si] = gram;
docTopicCounts[di][topic]++;
fi = fs.getIndexAtPosition(si);
if (prevFi != -1)
typeNgramTopicCounts[prevFi][gram][prevTopic]++;
if (gram == 0) {
unitypeTopicCounts[fi][topic]++;
tokensPerTopic[topic]++;
} else {
bitypeTopicCounts[fs.getBiIndexAtPosition(si)][topic]++;
bitokensPerTopic[prevFi][topic]++;
}
prevFi = fi; prevTopic = topic;
}
}
for (int iterations = 0; iterations < numIterations; iterations++) {
sampleTopicsForAllDocs (r);
if (iterations % 10 == 0) System.out.print (iterations); else System.out.print (".");
System.out.flush();
if (showTopicsInterval != 0 && iterations % showTopicsInterval == 0 && iterations > 0) {
System.out.println ();
printTopWords (5, false);
}
if (outputModelInterval != 0 && iterations % outputModelInterval == 0 && iterations > 0) {
this.write (new File(outputModelFilename+'.'+iterations));
}
}
System.out.println ("\nTotal time (sec): " + ((System.currentTimeMillis() - startTime)/1000.0));
}
/* One iteration of Gibbs sampling, across all documents. */
private void sampleTopicsForAllDocs (Randoms r)
{
double[] uniTopicWeights = new double[numTopics];
double[] biTopicWeights = new double[numTopics*2];
// Loop over every word in the corpus
for (int di = 0; di < topics.length; di++) {
sampleTopicsForOneDoc ((FeatureSequenceWithBigrams)ilist.get(di).getData(),
topics[di], grams[di], docTopicCounts[di],
uniTopicWeights, biTopicWeights,
r);
}
}
private void sampleTopicsForOneDoc (FeatureSequenceWithBigrams oneDocTokens,
int[] oneDocTopics, int[] oneDocGrams,
int[] oneDocTopicCounts, // indexed by topic index
double[] uniTopicWeights, // length==numTopics
double[] biTopicWeights, // length==numTopics*2: joint topic/gram sampling
Randoms r)
{
int[] currentTypeTopicCounts;
int[] currentBitypeTopicCounts;
int[] previousBitokensPerTopic;
int type, bitype, oldGram, nextGram, newGram, oldTopic, newTopic;
double topicWeightsSum, tw;
// xxx int docLen = oneDocTokens.length;
int docLen = oneDocTokens.getLength();
// Iterate over the positions (words) in the document
for (int si = 0; si < docLen; si++) {
type = oneDocTokens.getIndexAtPosition(si);
bitype = oneDocTokens.getBiIndexAtPosition(si);
//if (bitype == -1) System.out.println ("biblock "+si+" at "+uniAlphabet.lookupObject(type));
oldTopic = oneDocTopics[si];
oldGram = oneDocGrams[si];
nextGram = (si == docLen-1) ? -1 : oneDocGrams[si+1];
//nextGram = (si == docLen-1) ? -1 : (oneDocTokens.getBiIndexAtPosition(si+1) == -1 ? 0 : 1);
boolean bigramPossible = (bitype != -1);
assert (!(!bigramPossible && oldGram == 1));
if (!bigramPossible) {
// Remove this token from all counts
oneDocTopicCounts[oldTopic]--;
tokensPerTopic[oldTopic]--;
unitypeTopicCounts[type][oldTopic]--;
if (si != docLen-1) {
typeNgramTopicCounts[type][nextGram][oldTopic]--;
assert (typeNgramTopicCounts[type][nextGram][oldTopic] >= 0);
}
assert (oneDocTopicCounts[oldTopic] >= 0);
assert (tokensPerTopic[oldTopic] >= 0);
assert (unitypeTopicCounts[type][oldTopic] >= 0);
// Build a distribution over topics for this token
Arrays.fill (uniTopicWeights, 0.0);
topicWeightsSum = 0;
currentTypeTopicCounts = unitypeTopicCounts[type];
for (int ti = 0; ti < numTopics; ti++) {
tw = ((currentTypeTopicCounts[ti] + beta) / (tokensPerTopic[ti] + vBeta))
* ((oneDocTopicCounts[ti] + alpha)); // additional term is constance across all topics
topicWeightsSum += tw;
uniTopicWeights[ti] = tw;
}
// Sample a topic assignment from this distribution
newTopic = r.nextDiscrete (uniTopicWeights, topicWeightsSum);
// Put that new topic into the counts
oneDocTopics[si] = newTopic;
oneDocTopicCounts[newTopic]++;
unitypeTopicCounts[type][newTopic]++;
tokensPerTopic[newTopic]++;
if (si != docLen-1)
typeNgramTopicCounts[type][nextGram][newTopic]++;
} else {
// Bigram is possible
int prevType = oneDocTokens.getIndexAtPosition(si-1);
int prevTopic = oneDocTopics[si-1];
// Remove this token from all counts
oneDocTopicCounts[oldTopic]--;
typeNgramTopicCounts[prevType][oldGram][prevTopic]--;
if (si != docLen-1)
typeNgramTopicCounts[type][nextGram][oldTopic]--;
if (oldGram == 0) {
unitypeTopicCounts[type][oldTopic]--;
tokensPerTopic[oldTopic]--;
} else {
bitypeTopicCounts[bitype][oldTopic]--;
bitokensPerTopic[prevType][oldTopic]--;
biTokens--;
}
assert (oneDocTopicCounts[oldTopic] >= 0);
assert (typeNgramTopicCounts[prevType][oldGram][prevTopic] >= 0);
assert (si == docLen-1 || typeNgramTopicCounts[type][nextGram][oldTopic] >= 0);
assert (unitypeTopicCounts[type][oldTopic] >= 0);
assert (tokensPerTopic[oldTopic] >= 0);
assert (bitypeTopicCounts[bitype][oldTopic] >= 0);
assert (bitokensPerTopic[prevType][oldTopic] >= 0);
assert (biTokens >= 0);
// Build a joint distribution over topics and ngram-status for this token
Arrays.fill (biTopicWeights, 0.0);
topicWeightsSum = 0;
currentTypeTopicCounts = unitypeTopicCounts[type];
currentBitypeTopicCounts = bitypeTopicCounts[bitype];
previousBitokensPerTopic = bitokensPerTopic[prevType];
for (int ti = 0; ti < numTopics; ti++) {
newTopic = ti << 1; // just using this variable as an index into [ti*2+gram]
// The unigram outcome
tw =
(currentTypeTopicCounts[ti] + beta) / (tokensPerTopic[ti] + vBeta)
* (oneDocTopicCounts[ti] + alpha)
* (typeNgramTopicCounts[prevType][0][prevTopic] + delta1);
topicWeightsSum += tw;
biTopicWeights[newTopic] = tw;
// The bigram outcome
newTopic++;
tw =
(currentBitypeTopicCounts[ti] + gamma) / (previousBitokensPerTopic[ti] + vGamma)
* (oneDocTopicCounts[ti] + alpha)
* (typeNgramTopicCounts[prevType][1][prevTopic] + delta2);
topicWeightsSum += tw;
biTopicWeights[newTopic] = tw;
}
// Sample a topic assignment from this distribution
newTopic = r.nextDiscrete (biTopicWeights, topicWeightsSum);
// Put that new topic into the counts
newGram = newTopic % 2;
newTopic /= 2;
// Put that new topic into the counts
oneDocTopics[si] = newTopic;
oneDocGrams[si] = newGram;
oneDocTopicCounts[newTopic]++;
typeNgramTopicCounts[prevType][newGram][prevTopic]++;
if (si != docLen-1)
typeNgramTopicCounts[type][nextGram][newTopic]++;
if (newGram == 0) {
unitypeTopicCounts[type][newTopic]++;
tokensPerTopic[newTopic]++;
} else {
bitypeTopicCounts[bitype][newTopic]++;
bitokensPerTopic[prevType][newTopic]++;
biTokens++;
}
}
}
}
public void printTopWords (int numWords, boolean useNewLines)
{
class WordProb implements Comparable {
int wi; double p;
public WordProb (int wi, double p) { this.wi = wi; this.p = p; }
public final int compareTo (Object o2) {
if (p > ((WordProb)o2).p)
return -1;
else if (p == ((WordProb)o2).p)
return 0;
else return 1;
}
}
for (int ti = 0; ti < numTopics; ti++) {
// Unigrams
WordProb[] wp = new WordProb[numTypes];
for (int wi = 0; wi < numTypes; wi++)
wp[wi] = new WordProb (wi, (double)unitypeTopicCounts[wi][ti]);
Arrays.sort (wp);
int numToPrint = Math.min(wp.length, numWords);
if (useNewLines) {
System.out.println ("\nTopic "+ti+" unigrams");
for (int i = 0; i < numToPrint; i++)
System.out.println (uniAlphabet.lookupObject(wp[i].wi).toString()
+ " " + wp[i].p/tokensPerTopic[ti]);
} else {
System.out.print ("Topic "+ti+": ");
for (int i = 0; i < numToPrint; i++)
System.out.print (uniAlphabet.lookupObject(wp[i].wi).toString() + " ");
}
// Bigrams
/*
wp = new WordProb[numBitypes];
int bisum = 0;
for (int wi = 0; wi < numBitypes; wi++) {
wp[wi] = new WordProb (wi, ((double)bitypeTopicCounts[wi][ti]));
bisum += bitypeTopicCounts[wi][ti];
}
Arrays.sort (wp);
numToPrint = Math.min(wp.length, numWords);
if (useNewLines) {
System.out.println ("\nTopic "+ti+" bigrams");
for (int i = 0; i < numToPrint; i++)
System.out.println (biAlphabet.lookupObject(wp[i].wi).toString() + " " + wp[i].p/bisum);
} else {
System.out.print (" ");
for (int i = 0; i < numToPrint; i++)
System.out.print (biAlphabet.lookupObject(wp[i].wi).toString() + " ");
System.out.println();
}
*/
// Ngrams
AugmentableFeatureVector afv = new AugmentableFeatureVector(new Alphabet(), 10000, false);
for (int di = 0; di < topics.length; di++) {
FeatureSequenceWithBigrams fs = (FeatureSequenceWithBigrams) ilist.get(di).getData();
for (int si = topics[di].length-1; si >= 0; si--) {
if (topics[di][si] == ti && grams[di][si] == 1) {
String gramString = uniAlphabet.lookupObject(fs.getIndexAtPosition(si)).toString();
while (grams[di][si] == 1 && --si >= 0)
gramString = uniAlphabet.lookupObject(fs.getIndexAtPosition(si)).toString() + "_" + gramString;
afv.add(gramString, 1.0);
}
}
}
//System.out.println ("pre-sorting");
int numNgrams = afv.numLocations();
//System.out.println ("post-sorting "+numNgrams);
wp = new WordProb[numNgrams];
int ngramSum = 0;
for (int loc = 0; loc < numNgrams; loc++) {
wp[loc] = new WordProb (afv.indexAtLocation(loc), afv.valueAtLocation(loc));
ngramSum += wp[loc].p;
}
Arrays.sort (wp);
int numUnitypeTokens = 0, numBitypeTokens = 0, numUnitypeTypes = 0, numBitypeTypes = 0;
for (int fi = 0; fi < numTypes; fi++) {
numUnitypeTokens += unitypeTopicCounts[fi][ti];
if (unitypeTopicCounts[fi][ti] != 0)
numUnitypeTypes++;
}
for (int fi = 0; fi < numBitypes; fi++) {
numBitypeTokens += bitypeTopicCounts[fi][ti];
if (bitypeTopicCounts[fi][ti] != 0)
numBitypeTypes++;
}
if (useNewLines) {
System.out.println ("\nTopic "+ti+" unigrams "+numUnitypeTokens+"/"+numUnitypeTypes+" bigrams "+numBitypeTokens+"/"+numBitypeTypes
+" phrases "+Math.round(afv.oneNorm())+"/"+numNgrams);
for (int i = 0; i < Math.min(numNgrams,numWords); i++)
System.out.println (afv.getAlphabet().lookupObject(wp[i].wi).toString() + " " + wp[i].p/ngramSum);
} else {
System.out.print (" (unigrams "+numUnitypeTokens+"/"+numUnitypeTypes+" bigrams "+numBitypeTokens+"/"+numBitypeTypes
+" phrases "+Math.round(afv.oneNorm())+"/"+numNgrams+")\n ");
//System.out.print (" (unique-ngrams="+numNgrams+" ngram-count="+Math.round(afv.oneNorm())+")\n ");
for (int i = 0; i < Math.min(numNgrams, numWords); i++)
System.out.print (afv.getAlphabet().lookupObject(wp[i].wi).toString() + " ");
System.out.println();
}
}
}
public void printDocumentTopics (File f) throws IOException
{
printDocumentTopics (new PrintWriter (new FileWriter (f)));
}
public void printDocumentTopics (PrintWriter pw) {
}
public void printDocumentTopics (PrintWriter pw, double threshold, int max)
{
pw.println ("#doc source topic proportions");
int docLen;
double topicDist[] = new double[topics.length];
for (int di = 0; di < topics.length; di++) {
pw.print (di); pw.print (' ');
pw.print (ilist.get(di).getSource().toString()); pw.print (' ');
docLen = topics[di].length;
for (int ti = 0; ti < numTopics; ti++)
topicDist[ti] = (((float)docTopicCounts[di][ti])/docLen);
if (max < 0) max = numTopics;
for (int tp = 0; tp < max; tp++) {
double maxvalue = 0;
int maxindex = -1;
for (int ti = 0; ti < numTopics; ti++)
if (topicDist[ti] > maxvalue) {
maxvalue = topicDist[ti];
maxindex = ti;
}
if (maxindex == -1 || topicDist[maxindex] < threshold)
break;
pw.print (maxindex+" "+topicDist[maxindex]+" ");
topicDist[maxindex] = 0;
}
pw.println (' ');
}
}
public void printState (File f) throws IOException
{
PrintWriter writer = new PrintWriter (new FileWriter(f));
printState (writer);
writer.close();
}
public void printState (PrintWriter pw)
{
pw.println ("#doc pos typeindex type bigrampossible? topic bigram");
for (int di = 0; di < topics.length; di++) {
FeatureSequenceWithBigrams fs = (FeatureSequenceWithBigrams) ilist.get(di).getData();
for (int si = 0; si < topics[di].length; si++) {
int type = fs.getIndexAtPosition(si);
pw.print(di); pw.print(' ');
pw.print(si); pw.print(' ');
pw.print(type); pw.print(' ');
pw.print(uniAlphabet.lookupObject(type)); pw.print(' ');
pw.print(fs.getBiIndexAtPosition(si)==-1 ? 0 : 1); pw.print(' ');
pw.print(topics[di][si]); pw.print(' ');
pw.print(grams[di][si]); pw.println();
}
}
}
public void write (File f) {
try {
ObjectOutputStream oos = new ObjectOutputStream (new FileOutputStream(f));
oos.writeObject(this);
oos.close();
}
catch (IOException e) {
System.err.println("Exception writing file " + f + ": " + e);
}
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private static final int NULL_INTEGER = -1;
private void writeIntArray2 (int[][] a, ObjectOutputStream out) throws IOException {
out.writeInt (a.length);
int d2 = a[0].length;
out.writeInt (d2);
for (int i = 0; i < a.length; i++)
for (int j = 0; j < d2; j++)
out.writeInt (a[i][j]);
}
private int[][] readIntArray2 (ObjectInputStream in) throws IOException {
int d1 = in.readInt();
int d2 = in.readInt();
int[][] a = new int[d1][d2];
for (int i = 0; i < d1; i++)
for (int j = 0; j < d2; j++)
a[i][j] = in.readInt();
return a;
}
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject (ilist);
out.writeInt (numTopics);
out.writeDouble (alpha);
out.writeDouble (beta);
out.writeDouble (gamma);
out.writeDouble (delta);
out.writeDouble (tAlpha);
out.writeDouble (vBeta);
out.writeDouble (vGamma);
out.writeInt (numTypes);
out.writeInt (numBitypes);
out.writeInt (numTokens);
out.writeInt (biTokens);
for (int di = 0; di < topics.length; di ++)
for (int si = 0; si < topics[di].length; si++)
out.writeInt (topics[di][si]);
for (int di = 0; di < topics.length; di ++)
for (int si = 0; si < topics[di].length; si++)
out.writeInt (grams[di][si]);
writeIntArray2 (docTopicCounts, out);
for (int fi = 0; fi < numTypes; fi++)
for (int n = 0; n < 2; n++)
for (int ti = 0; ti < numTopics; ti++)
out.writeInt (typeNgramTopicCounts[fi][n][ti]);
writeIntArray2 (unitypeTopicCounts, out);
writeIntArray2 (bitypeTopicCounts, out);
for (int ti = 0; ti < numTopics; ti++)
out.writeInt (tokensPerTopic[ti]);
writeIntArray2 (bitokensPerTopic, out);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int featuresLength;
int version = in.readInt ();
ilist = (InstanceList) in.readObject ();
numTopics = in.readInt();
alpha = in.readDouble();
beta = in.readDouble();
gamma = in.readDouble();
delta = in.readDouble();
tAlpha = in.readDouble();
vBeta = in.readDouble();
vGamma = in.readDouble();
numTypes = in.readInt();
numBitypes = in.readInt();
numTokens = in.readInt();
biTokens = in.readInt();
int numDocs = ilist.size();
topics = new int[numDocs][];
grams = new int[numDocs][];
for (int di = 0; di < ilist.size(); di++) {
int docLen = ((FeatureSequence)ilist.get(di).getData()).getLength();
topics[di] = new int[docLen];
for (int si = 0; si < docLen; si++)
topics[di][si] = in.readInt();
}
for (int di = 0; di < ilist.size(); di++) {
int docLen = ((FeatureSequence)ilist.get(di).getData()).getLength();
grams[di] = new int[docLen];
for (int si = 0; si < docLen; si++)
grams[di][si] = in.readInt();
}
docTopicCounts = readIntArray2 (in);
typeNgramTopicCounts = new int[numTypes][2][numTopics];
for (int fi = 0; fi < numTypes; fi++)
for (int n = 0; n < 2; n++)
for (int ti = 0; ti < numTopics; ti++)
typeNgramTopicCounts[fi][n][ti] = in.readInt();
unitypeTopicCounts = readIntArray2 (in);
bitypeTopicCounts = readIntArray2 (in);
tokensPerTopic = new int[numTopics];
for (int ti = 0; ti < numTopics; ti++)
tokensPerTopic[ti] = in.readInt();
bitokensPerTopic = readIntArray2 (in);
}
// Just for testing. Recommend instead is mallet/bin/vectors2topics
public static void main (String[] args)
{
InstanceList ilist = InstanceList.load (new File(args[0]));
int numIterations = args.length > 1 ? Integer.parseInt(args[1]) : 1000;
int numTopWords = args.length > 2 ? Integer.parseInt(args[2]) : 20;
System.out.println ("Data loaded.");
TopicalNGrams tng = new TopicalNGrams (10);
tng.estimate (ilist, 200, 1, 0, null, new Randoms());
tng.printTopWords (60, true);
}
}
| 22,348 | 36.943973 | 134 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/SimpleLDA.java | /* Copyright (C) 2005 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.topics;
import java.util.*;
import java.util.logging.*;
import java.util.zip.*;
import java.io.*;
import java.text.NumberFormat;
import cc.mallet.topics.*;
import cc.mallet.types.*;
import cc.mallet.util.*;
/**
* A simple implementation of Latent Dirichlet Allocation using Gibbs sampling.
* This code is slower than the regular Mallet LDA implementation, but provides a
* better starting place for understanding how sampling works and for
* building new topic models.
*
* @author David Mimno, Andrew McCallum
*/
public class SimpleLDA implements Serializable {
private static Logger logger = MalletLogger.getLogger(SimpleLDA.class.getName());
// the training instances and their topic assignments
protected ArrayList<TopicAssignment> data;
// the alphabet for the input data
protected Alphabet alphabet;
// the alphabet for the topics
protected LabelAlphabet topicAlphabet;
// The number of topics requested
protected int numTopics;
// The size of the vocabulary
protected int numTypes;
// Prior parameters
protected double alpha; // Dirichlet(alpha,alpha,...) is the distribution over topics
protected double alphaSum;
protected double beta; // Prior on per-topic multinomial distribution over words
protected double betaSum;
public static final double DEFAULT_BETA = 0.01;
// An array to put the topic counts for the current document.
// Initialized locally below. Defined here to avoid
// garbage collection overhead.
protected int[] oneDocTopicCounts; // indexed by <document index, topic index>
// Statistics needed for sampling.
protected int[][] typeTopicCounts; // indexed by <feature index, topic index>
protected int[] tokensPerTopic; // indexed by <topic index>
public int showTopicsInterval = 50;
public int wordsPerTopic = 10;
protected Randoms random;
protected NumberFormat formatter;
protected boolean printLogLikelihood = false;
public SimpleLDA (int numberOfTopics) {
this (numberOfTopics, numberOfTopics, DEFAULT_BETA);
}
public SimpleLDA (int numberOfTopics, double alphaSum, double beta) {
this (numberOfTopics, alphaSum, beta, new Randoms());
}
private static LabelAlphabet newLabelAlphabet (int numTopics) {
LabelAlphabet ret = new LabelAlphabet();
for (int i = 0; i < numTopics; i++)
ret.lookupIndex("topic"+i);
return ret;
}
public SimpleLDA (int numberOfTopics, double alphaSum, double beta, Randoms random) {
this (newLabelAlphabet (numberOfTopics), alphaSum, beta, random);
}
public SimpleLDA (LabelAlphabet topicAlphabet, double alphaSum, double beta, Randoms random)
{
this.data = new ArrayList<TopicAssignment>();
this.topicAlphabet = topicAlphabet;
this.numTopics = topicAlphabet.size();
this.alphaSum = alphaSum;
this.alpha = alphaSum / numTopics;
this.beta = beta;
this.random = random;
oneDocTopicCounts = new int[numTopics];
tokensPerTopic = new int[numTopics];
formatter = NumberFormat.getInstance();
formatter.setMaximumFractionDigits(5);
logger.info("Simple LDA: " + numTopics + " topics");
}
public Alphabet getAlphabet() { return alphabet; }
public LabelAlphabet getTopicAlphabet() { return topicAlphabet; }
public int getNumTopics() { return numTopics; }
public ArrayList<TopicAssignment> getData() { return data; }
public void setTopicDisplay(int interval, int n) {
this.showTopicsInterval = interval;
this.wordsPerTopic = n;
}
public void setRandomSeed(int seed) {
random = new Randoms(seed);
}
public int[][] getTypeTopicCounts() { return typeTopicCounts; }
public int[] getTopicTotals() { return tokensPerTopic; }
public void addInstances (InstanceList training) {
alphabet = training.getDataAlphabet();
numTypes = alphabet.size();
betaSum = beta * numTypes;
typeTopicCounts = new int[numTypes][numTopics];
int doc = 0;
for (Instance instance : training) {
doc++;
FeatureSequence tokens = (FeatureSequence) instance.getData();
LabelSequence topicSequence =
new LabelSequence(topicAlphabet, new int[ tokens.size() ]);
int[] topics = topicSequence.getFeatures();
for (int position = 0; position < tokens.size(); position++) {
int topic = random.nextInt(numTopics);
topics[position] = topic;
tokensPerTopic[topic]++;
int type = tokens.getIndexAtPosition(position);
typeTopicCounts[type][topic]++;
}
TopicAssignment t = new TopicAssignment (instance, topicSequence);
data.add (t);
}
}
public void sample (int iterations) throws IOException {
for (int iteration = 1; iteration <= iterations; iteration++) {
long iterationStart = System.currentTimeMillis();
// Loop over every document in the corpus
for (int doc = 0; doc < data.size(); doc++) {
FeatureSequence tokenSequence =
(FeatureSequence) data.get(doc).instance.getData();
LabelSequence topicSequence =
(LabelSequence) data.get(doc).topicSequence;
sampleTopicsForOneDoc (tokenSequence, topicSequence);
}
long elapsedMillis = System.currentTimeMillis() - iterationStart;
logger.fine(iteration + "\t" + elapsedMillis + "ms\t");
// Occasionally print more information
if (showTopicsInterval != 0 && iteration % showTopicsInterval == 0) {
logger.info("<" + iteration + "> Log Likelihood: " + modelLogLikelihood() + "\n" +
topWords (wordsPerTopic));
}
}
}
protected void sampleTopicsForOneDoc (FeatureSequence tokenSequence,
FeatureSequence topicSequence) {
int[] oneDocTopics = topicSequence.getFeatures();
int[] currentTypeTopicCounts;
int type, oldTopic, newTopic;
double topicWeightsSum;
int docLength = tokenSequence.getLength();
int[] localTopicCounts = new int[numTopics];
// populate topic counts
for (int position = 0; position < docLength; position++) {
localTopicCounts[oneDocTopics[position]]++;
}
double score, sum;
double[] topicTermScores = new double[numTopics];
// Iterate over the positions (words) in the document
for (int position = 0; position < docLength; position++) {
type = tokenSequence.getIndexAtPosition(position);
oldTopic = oneDocTopics[position];
// Grab the relevant row from our two-dimensional array
currentTypeTopicCounts = typeTopicCounts[type];
// Remove this token from all counts.
localTopicCounts[oldTopic]--;
tokensPerTopic[oldTopic]--;
assert(tokensPerTopic[oldTopic] >= 0) : "old Topic " + oldTopic + " below 0";
currentTypeTopicCounts[oldTopic]--;
// Now calculate and add up the scores for each topic for this word
sum = 0.0;
// Here's where the math happens! Note that overall performance is
// dominated by what you do in this loop.
for (int topic = 0; topic < numTopics; topic++) {
score =
(alpha + localTopicCounts[topic]) *
((beta + currentTypeTopicCounts[topic]) /
(betaSum + tokensPerTopic[topic]));
sum += score;
topicTermScores[topic] = score;
}
// Choose a random point between 0 and the sum of all topic scores
double sample = random.nextUniform() * sum;
// Figure out which topic contains that point
newTopic = -1;
while (sample > 0.0) {
newTopic++;
sample -= topicTermScores[newTopic];
}
// Make sure we actually sampled a topic
if (newTopic == -1) {
throw new IllegalStateException ("SimpleLDA: New topic not sampled.");
}
// Put that new topic into the counts
oneDocTopics[position] = newTopic;
localTopicCounts[newTopic]++;
tokensPerTopic[newTopic]++;
currentTypeTopicCounts[newTopic]++;
}
}
public double modelLogLikelihood() {
double logLikelihood = 0.0;
int nonZeroTopics;
// The likelihood of the model is a combination of a
// Dirichlet-multinomial for the words in each topic
// and a Dirichlet-multinomial for the topics in each
// document.
// The likelihood function of a dirichlet multinomial is
// Gamma( sum_i alpha_i ) prod_i Gamma( alpha_i + N_i )
// prod_i Gamma( alpha_i ) Gamma( sum_i (alpha_i + N_i) )
// So the log likelihood is
// logGamma ( sum_i alpha_i ) - logGamma ( sum_i (alpha_i + N_i) ) +
// sum_i [ logGamma( alpha_i + N_i) - logGamma( alpha_i ) ]
// Do the documents first
int[] topicCounts = new int[numTopics];
double[] topicLogGammas = new double[numTopics];
int[] docTopics;
for (int topic=0; topic < numTopics; topic++) {
topicLogGammas[ topic ] = Dirichlet.logGamma( alpha );
}
for (int doc=0; doc < data.size(); doc++) {
LabelSequence topicSequence = (LabelSequence) data.get(doc).topicSequence;
docTopics = topicSequence.getFeatures();
for (int token=0; token < docTopics.length; token++) {
topicCounts[ docTopics[token] ]++;
}
for (int topic=0; topic < numTopics; topic++) {
if (topicCounts[topic] > 0) {
logLikelihood += (Dirichlet.logGamma(alpha + topicCounts[topic]) -
topicLogGammas[ topic ]);
}
}
// subtract the (count + parameter) sum term
logLikelihood -= Dirichlet.logGamma(alphaSum + docTopics.length);
Arrays.fill(topicCounts, 0);
}
// add the parameter sum term
logLikelihood += data.size() * Dirichlet.logGamma(alphaSum);
// And the topics
// Count the number of type-topic pairs
int nonZeroTypeTopics = 0;
for (int type=0; type < numTypes; type++) {
// reuse this array as a pointer
topicCounts = typeTopicCounts[type];
for (int topic = 0; topic < numTopics; topic++) {
if (topicCounts[topic] == 0) { continue; }
nonZeroTypeTopics++;
logLikelihood += Dirichlet.logGamma(beta + topicCounts[topic]);
if (Double.isNaN(logLikelihood)) {
System.out.println(topicCounts[topic]);
System.exit(1);
}
}
}
for (int topic=0; topic < numTopics; topic++) {
logLikelihood -=
Dirichlet.logGamma( (beta * numTopics) +
tokensPerTopic[ topic ] );
if (Double.isNaN(logLikelihood)) {
System.out.println("after topic " + topic + " " + tokensPerTopic[ topic ]);
System.exit(1);
}
}
logLikelihood +=
(Dirichlet.logGamma(beta * numTopics)) -
(Dirichlet.logGamma(beta) * nonZeroTypeTopics);
if (Double.isNaN(logLikelihood)) {
System.out.println("at the end");
System.exit(1);
}
return logLikelihood;
}
//
// Methods for displaying and saving results
//
public String topWords (int numWords) {
StringBuilder output = new StringBuilder();
IDSorter[] sortedWords = new IDSorter[numTypes];
for (int topic = 0; topic < numTopics; topic++) {
for (int type = 0; type < numTypes; type++) {
sortedWords[type] = new IDSorter(type, typeTopicCounts[type][topic]);
}
Arrays.sort(sortedWords);
output.append(topic + "\t" + tokensPerTopic[topic] + "\t");
for (int i=0; i < numWords; i++) {
output.append(alphabet.lookupObject(sortedWords[i].getID()) + " ");
}
output.append("\n");
}
return output.toString();
}
/**
* @param file The filename to print to
* @param threshold Only print topics with proportion greater than this number
* @param max Print no more than this many topics
*/
public void printDocumentTopics (File file, double threshold, int max) throws IOException {
PrintWriter out = new PrintWriter(file);
out.print ("#doc source topic proportion ...\n");
int docLen;
int[] topicCounts = new int[ numTopics ];
IDSorter[] sortedTopics = new IDSorter[ numTopics ];
for (int topic = 0; topic < numTopics; topic++) {
// Initialize the sorters with dummy values
sortedTopics[topic] = new IDSorter(topic, topic);
}
if (max < 0 || max > numTopics) {
max = numTopics;
}
for (int doc = 0; doc < data.size(); doc++) {
LabelSequence topicSequence = (LabelSequence) data.get(doc).topicSequence;
int[] currentDocTopics = topicSequence.getFeatures();
out.print (doc); out.print (' ');
if (data.get(doc).instance.getSource() != null) {
out.print (data.get(doc).instance.getSource());
}
else {
out.print ("null-source");
}
out.print (' ');
docLen = currentDocTopics.length;
// Count up the tokens
for (int token=0; token < docLen; token++) {
topicCounts[ currentDocTopics[token] ]++;
}
// And normalize
for (int topic = 0; topic < numTopics; topic++) {
sortedTopics[topic].set(topic, (float) topicCounts[topic] / docLen);
}
Arrays.sort(sortedTopics);
for (int i = 0; i < max; i++) {
if (sortedTopics[i].getWeight() < threshold) { break; }
out.print (sortedTopics[i].getID() + " " +
sortedTopics[i].getWeight() + " ");
}
out.print (" \n");
Arrays.fill(topicCounts, 0);
}
}
public void printState (File f) throws IOException {
PrintStream out =
new PrintStream(new GZIPOutputStream(new BufferedOutputStream(new FileOutputStream(f))));
printState(out);
out.close();
}
public void printState (PrintStream out) {
out.println ("#doc source pos typeindex type topic");
for (int doc = 0; doc < data.size(); doc++) {
FeatureSequence tokenSequence = (FeatureSequence) data.get(doc).instance.getData();
LabelSequence topicSequence = (LabelSequence) data.get(doc).topicSequence;
String source = "NA";
if (data.get(doc).instance.getSource() != null) {
source = data.get(doc).instance.getSource().toString();
}
for (int position = 0; position < topicSequence.getLength(); position++) {
int type = tokenSequence.getIndexAtPosition(position);
int topic = topicSequence.getIndexAtPosition(position);
out.print(doc); out.print(' ');
out.print(source); out.print(' ');
out.print(position); out.print(' ');
out.print(type); out.print(' ');
out.print(alphabet.lookupObject(type)); out.print(' ');
out.print(topic); out.println();
}
}
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private static final int NULL_INTEGER = -1;
public void write (File f) {
try {
ObjectOutputStream oos = new ObjectOutputStream (new FileOutputStream(f));
oos.writeObject(this);
oos.close();
}
catch (IOException e) {
System.err.println("Exception writing file " + f + ": " + e);
}
}
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
// Instance lists
out.writeObject (data);
out.writeObject (alphabet);
out.writeObject (topicAlphabet);
out.writeInt (numTopics);
out.writeObject (alpha);
out.writeDouble (beta);
out.writeDouble (betaSum);
out.writeInt(showTopicsInterval);
out.writeInt(wordsPerTopic);
out.writeObject(random);
out.writeObject(formatter);
out.writeBoolean(printLogLikelihood);
out.writeObject (typeTopicCounts);
for (int ti = 0; ti < numTopics; ti++) {
out.writeInt (tokensPerTopic[ti]);
}
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int featuresLength;
int version = in.readInt ();
data = (ArrayList<TopicAssignment>) in.readObject ();
alphabet = (Alphabet) in.readObject();
topicAlphabet = (LabelAlphabet) in.readObject();
numTopics = in.readInt();
alpha = in.readDouble();
alphaSum = alpha * numTopics;
beta = in.readDouble();
betaSum = in.readDouble();
showTopicsInterval = in.readInt();
wordsPerTopic = in.readInt();
random = (Randoms) in.readObject();
formatter = (NumberFormat) in.readObject();
printLogLikelihood = in.readBoolean();
int numDocs = data.size();
this.numTypes = alphabet.size();
typeTopicCounts = (int[][]) in.readObject();
tokensPerTopic = new int[numTopics];
for (int ti = 0; ti < numTopics; ti++) {
tokensPerTopic[ti] = in.readInt();
}
}
public static void main (String[] args) throws IOException {
InstanceList training = InstanceList.load (new File(args[0]));
int numTopics = args.length > 1 ? Integer.parseInt(args[1]) : 200;
SimpleLDA lda = new SimpleLDA (numTopics, 50.0, 0.01);
lda.addInstances(training);
lda.sample(1000);
}
}
| 16,480 | 27.762653 | 93 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/MultinomialHMM.java | /* Copyright (C) 2005 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.topics;
import cc.mallet.types.*;
import cc.mallet.util.Randoms;
import java.util.Arrays;
import java.util.List;
import java.util.ArrayList;
import java.util.zip.*;
import java.io.*;
import java.text.NumberFormat;
import gnu.trove.*;
/**
* Latent Dirichlet Allocation.
* @author David Mimno, Andrew McCallum
*/
public class MultinomialHMM {
int numTopics; // Number of topics to be fit
int numStates; // Number of hidden states
int numDocs;
int numSequences;
// Dirichlet(alpha,alpha,...) is the distribution over topics
double[] alpha;
double alphaSum;
// Prior on per-topic multinomial distribution over words
double beta;
double betaSum;
// Prior on the state-state transition distributions
double gamma;
double gammaSum;
double pi;
double sumPi;
TIntObjectHashMap<TIntIntHashMap> documentTopics;
int[] documentSequenceIDs;
int[] documentStates;
int[][] stateTopicCounts;
int[] stateTopicTotals;
int[][] stateStateTransitions;
int[] stateTransitionTotals;
int[] initialStateCounts;
// Keep track of the most times each topic is
// used in any document
int[] maxTokensPerTopic;
// The size of the largest document
int maxDocLength;
// Rather than calculating log gammas for every state and every topic
// we cache log predictive distributions for every possible state
// and document.
double[][][] topicLogGammaCache;
double[][] docLogGammaCache;
int numIterations = 1000;
int burninPeriod = 200;
int saveSampleInterval = 10;
int optimizeInterval = 0;
int showTopicsInterval = 50;
String[] topicKeys;
Randoms random;
NumberFormat formatter;
public MultinomialHMM (int numberOfTopics, String topicsFilename, int numStates) throws IOException {
formatter = NumberFormat.getInstance();
formatter.setMaximumFractionDigits(5);
System.out.println("LDA HMM: " + numberOfTopics);
documentTopics = new TIntObjectHashMap<TIntIntHashMap>();
this.numTopics = numberOfTopics;
this.alphaSum = numberOfTopics;
this.alpha = new double[numberOfTopics];
Arrays.fill(alpha, alphaSum / numTopics);
topicKeys = new String[numTopics];
// This initializes numDocs as well
loadTopicsFromFile(topicsFilename);
documentStates = new int[ numDocs ];
documentSequenceIDs = new int[ numDocs ];
maxTokensPerTopic = new int[ numTopics ];
maxDocLength = 0;
//int[] histogram = new int[380];
//int totalTokens = 0;
for (int doc=0; doc < numDocs; doc++) {
if (! documentTopics.containsKey(doc)) { continue; }
TIntIntHashMap topicCounts = documentTopics.get(doc);
int count = 0;
for (int topic: topicCounts.keys()) {
int topicCount = topicCounts.get(topic);
//histogram[topicCount]++;
//totalTokens += topicCount;
if (topicCount > maxTokensPerTopic[topic]) {
maxTokensPerTopic[topic] = topicCount;
}
count += topicCount;
}
if (count > maxDocLength) {
maxDocLength = count;
}
}
/*
double runningTotal = 0.0;
for (int i=337; i >= 0; i--) {
runningTotal += i * histogram[i];
System.out.format("%d\t%d\t%.3f\n", i, histogram[i],
runningTotal / totalTokens);
}
*/
this.numStates = numStates;
this.initialStateCounts = new int[numStates];
topicLogGammaCache = new double[numStates][numTopics][];
for (int state=0; state < numStates; state++) {
for (int topic=0; topic < numTopics; topic++) {
topicLogGammaCache[state][topic] = new double[ maxTokensPerTopic[topic] + 1 ];
//topicLogGammaCache[state][topic] = new double[21];
}
}
System.out.println( maxDocLength );
docLogGammaCache = new double[numStates][ maxDocLength + 1 ];
}
public void setGamma(double g) {
this.gamma = g;
}
public void setNumIterations (int numIterations) {
this.numIterations = numIterations;
}
public void setBurninPeriod (int burninPeriod) {
this.burninPeriod = burninPeriod;
}
public void setTopicDisplayInterval(int interval) {
this.showTopicsInterval = interval;
}
public void setRandomSeed(int seed) {
random = new Randoms(seed);
}
public void setOptimizeInterval(int interval) {
this.optimizeInterval = interval;
}
public void initialize () {
if (random == null) {
random = new Randoms();
}
gammaSum = gamma * numStates;
stateTopicCounts = new int[numStates][numTopics];
stateTopicTotals = new int[numStates];
stateStateTransitions = new int[numStates][numStates];
stateTransitionTotals = new int[numStates];
pi = 1000.0;
sumPi = numStates * pi;
int maxTokens = 0;
int totalTokens = 0;
numSequences = 0;
int sequenceID;
int currentSequenceID = -1;
// The code to cache topic distributions
// takes an int-int hashmap as a mask to only update
// the distributions for topics that have actually changed.
// Here we create a dummy count hash that has all the topics.
TIntIntHashMap allTopicsDummy = new TIntIntHashMap();
for (int topic = 0; topic < numTopics; topic++) {
allTopicsDummy.put(topic, 1);
}
for (int state=0; state < numStates; state++) {
recacheStateTopicDistribution(state, allTopicsDummy);
}
for (int doc = 0; doc < numDocs; doc++) {
sampleState(doc, random, true);
}
}
private void recacheStateTopicDistribution(int state, TIntIntHashMap topicCounts) {
int[] currentStateTopicCounts = stateTopicCounts[state];
double[][] currentStateCache = topicLogGammaCache[state];
double[] cache;
for (int topic: topicCounts.keys()) {
cache = currentStateCache[topic];
cache[0] = 0.0;
for (int i=1; i < cache.length; i++) {
cache[i] =
cache[ i-1 ] +
Math.log( alpha[topic] + i - 1 +
currentStateTopicCounts[topic] );
}
}
docLogGammaCache[state][0] = 0.0;
for (int i=1; i < docLogGammaCache[state].length; i++) {
docLogGammaCache[state][i] =
docLogGammaCache[state][ i-1 ] +
Math.log( alphaSum + i - 1 +
stateTopicTotals[state] );
}
}
public void sample() throws IOException {
long startTime = System.currentTimeMillis();
for (int iterations = 1; iterations <= numIterations; iterations++) {
long iterationStart = System.currentTimeMillis();
//System.out.println (printStateTransitions());
for (int doc = 0; doc < numDocs; doc++) {
sampleState (doc, random, false);
//if (doc % 10000 == 0) { System.out.println (printStateTransitions()); }
}
System.out.print((System.currentTimeMillis() - iterationStart) + " ");
if (iterations % 10 == 0) {
System.out.println ("<" + iterations + "> ");
PrintWriter out =
new PrintWriter(new BufferedWriter(new FileWriter("state_state_matrix." + iterations)));
out.print(stateTransitionMatrix());
out.close();
out = new PrintWriter(new BufferedWriter(new FileWriter("state_topics." + iterations)));
out.print(stateTopics());
out.close();
if (iterations % 10 == 0) {
out = new PrintWriter(new BufferedWriter(new FileWriter("states." + iterations)));
for (int doc = 0; doc < documentStates.length; doc++) {
out.println(documentStates[doc]);
}
out.close();
}
}
System.out.flush();
}
long seconds = Math.round((System.currentTimeMillis() - startTime)/1000.0);
long minutes = seconds / 60; seconds %= 60;
long hours = minutes / 60; minutes %= 60;
long days = hours / 24; hours %= 24;
System.out.print ("\nTotal time: ");
if (days != 0) { System.out.print(days); System.out.print(" days "); }
if (hours != 0) { System.out.print(hours); System.out.print(" hours "); }
if (minutes != 0) { System.out.print(minutes); System.out.print(" minutes "); }
System.out.print(seconds); System.out.println(" seconds");
}
public void loadTopicsFromFile(String stateFilename) throws IOException {
BufferedReader in;
if (stateFilename.endsWith(".gz")) {
in = new BufferedReader(new InputStreamReader(new GZIPInputStream(new FileInputStream(stateFilename))));
}
else {
in = new BufferedReader(new FileReader(new File(stateFilename)));
}
numDocs = 0;
String line = null;
while ((line = in.readLine()) != null) {
if (line.startsWith("#")) {
continue;
}
String[] fields = line.split(" ");
int doc = Integer.parseInt(fields[0]);
int token = Integer.parseInt(fields[1]);
int type = Integer.parseInt(fields[2]);
int topic = Integer.parseInt(fields[4]);
// Now add the new topic
if (! documentTopics.containsKey(doc)) {
documentTopics.put(doc, new TIntIntHashMap());
}
if (documentTopics.get(doc).containsKey(topic)) {
documentTopics.get(doc).increment(topic);
}
else {
documentTopics.get(doc).put(topic, 1);
}
if (doc >= numDocs) { numDocs = doc + 1; }
}
in.close();
System.out.println("loaded topics, " + numDocs + " documents");
}
public void loadAlphaFromFile(String alphaFilename) throws IOException {
// Now restore the saved alpha parameters
alphaSum = 0.0;
BufferedReader in = new BufferedReader(new FileReader(new File(alphaFilename)));
String line = null;
while ((line = in.readLine()) != null) {
if (line.equals("")) { continue; }
String[] fields = line.split("\\s+");
int topic = Integer.parseInt(fields[0]);
alpha[topic] = 1.0; // Double.parseDouble(fields[1]);
alphaSum += alpha[topic];
StringBuffer topicKey = new StringBuffer();
for (int i=2; i<fields.length; i++) {
topicKey.append(fields[i] + " ");
}
topicKeys[topic] = topicKey.toString();
}
in.close();
System.out.println("loaded alpha");
}
/*
public void loadStatesFromFile(String stateFilename) throws IOException {
int doc = 0;
int state;
BufferedReader in = new BufferedReader(new FileReader(new File(stateFilename)));
String line = null;
while ((line = in.readLine()) != null) {
// We assume that the sequences are in the instance list
// in order.
state = Integer.parseInt(line);
documentStates[doc] = state;
// Additional bookkeeping will be performed when we load sequence IDs,
// so states MUST be loaded before sequences.
doc++;
}
in.close();
System.out.println("loaded states");
}
*/
public void loadSequenceIDsFromFile(String sequenceFilename) throws IOException {
int doc = 0;
int sequenceID;
int currentSequenceID = -1;
BufferedReader in = new BufferedReader(new FileReader(new File(sequenceFilename)));
String line = null;
while ((line = in.readLine()) != null) {
// We assume that the sequences are in the instance list
// in order.
String[] fields = line.split("\\t");
sequenceID = Integer.parseInt(fields[0]);
documentSequenceIDs[doc] = sequenceID;
if (sequenceID != currentSequenceID) {
numSequences ++;
}
currentSequenceID = sequenceID;
doc++;
}
in.close();
if (doc != numDocs) { System.out.println("Warning: number of documents with topics (" + numDocs + ") is not equal to number of docs with sequence IDs (" + doc + ")"); }
System.out.println("loaded sequence");
}
private void sampleState (int doc, Randoms r, boolean initializing) {
/*
if (doc % 10000 == 0) {
if (initializing) {
System.out.println("initializing doc " + doc);
}
else {
System.out.println("sampling doc " + doc);
}
}
*/
long startTime = System.currentTimeMillis();
// It's possible this document contains no words,
// in which case it has no topics, and no entry in the
// documentTopics hash.
if (! documentTopics.containsKey(doc)) { return; }
TIntIntHashMap topicCounts = documentTopics.get(doc);
// if we are in initializing mode, this is meaningless,
// but it won't hurt.
int oldState = documentStates[doc];
int[] currentStateTopicCounts = stateTopicCounts[oldState];
// Look at the document features (topics).
// If we're not in initializing mode, reduce the topic counts
// of the current (old) state.
int docLength = 0;
for (int topic: topicCounts.keys()) {
int topicCount = topicCounts.get(topic);
if (! initializing) {
currentStateTopicCounts[topic] -= topicCount;
}
docLength += topicCount;
}
if (! initializing) {
stateTopicTotals[oldState] -= docLength;
recacheStateTopicDistribution(oldState, topicCounts);
}
int previousSequenceID = -1;
if (doc > 0) {
previousSequenceID = documentSequenceIDs[ doc-1 ];
}
int sequenceID = documentSequenceIDs[ doc ];
int nextSequenceID = -1;
if (! initializing &&
doc < numDocs - 1) {
nextSequenceID = documentSequenceIDs[ doc+1 ];
}
double[] stateLogLikelihoods = new double[numStates];
double[] samplingDistribution = new double[numStates];
int nextState, previousState;
if (initializing) {
// Initializing the states is the same as sampling them,
// but we only look at the previous state and we don't decrement
// any counts.
if (previousSequenceID != sequenceID) {
// New sequence, start from scratch
for (int state = 0; state < numStates; state++) {
stateLogLikelihoods[state] = Math.log( (initialStateCounts[state] + pi) /
(numSequences - 1 + sumPi) );
}
}
else {
// Continuation
previousState = documentStates[ doc-1 ];
for (int state = 0; state < numStates; state++) {
stateLogLikelihoods[state] = Math.log( stateStateTransitions[previousState][state] + gamma );
if (Double.isInfinite(stateLogLikelihoods[state])) {
System.out.println("infinite end");
}
}
}
}
else {
// There are four cases:
if (previousSequenceID != sequenceID && sequenceID != nextSequenceID) {
// 1. This is a singleton document
initialStateCounts[oldState]--;
for (int state = 0; state < numStates; state++) {
stateLogLikelihoods[state] = Math.log( (initialStateCounts[state] + pi) /
(numSequences - 1 + sumPi) );
}
}
else if (previousSequenceID != sequenceID) {
// 2. This is the beginning of a sequence
initialStateCounts[oldState]--;
nextState = documentStates[doc+1];
stateStateTransitions[oldState][nextState]--;
assert(stateStateTransitions[oldState][nextState] >= 0);
stateTransitionTotals[oldState]--;
for (int state = 0; state < numStates; state++) {
stateLogLikelihoods[state] = Math.log( (stateStateTransitions[state][nextState] + gamma) *
(initialStateCounts[state] + pi) /
(numSequences - 1 + sumPi) );
if (Double.isInfinite(stateLogLikelihoods[state])) {
System.out.println("infinite beginning");
}
}
}
else if (sequenceID != nextSequenceID) {
// 3. This is the end of a sequence
previousState = documentStates[doc-1];
stateStateTransitions[previousState][oldState]--;
assert(stateStateTransitions[previousState][oldState] >= 0);
for (int state = 0; state < numStates; state++) {
stateLogLikelihoods[state] = Math.log( stateStateTransitions[previousState][state] + gamma );
if (Double.isInfinite(stateLogLikelihoods[state])) {
System.out.println("infinite end");
}
}
}
else {
// 4. This is the middle of a sequence
nextState = documentStates[doc+1];
stateStateTransitions[oldState][nextState]--;
if (stateStateTransitions[oldState][nextState] < 0) {
System.out.println(printStateTransitions());
System.out.println(oldState + " -> " + nextState);
System.out.println(sequenceID);
}
assert (stateStateTransitions[oldState][nextState] >= 0);
stateTransitionTotals[oldState]--;
previousState = documentStates[doc-1];
stateStateTransitions[previousState][oldState]--;
assert(stateStateTransitions[previousState][oldState] >= 0);
for (int state = 0; state < numStates; state++) {
if (previousState == state && state == nextState) {
stateLogLikelihoods[state] =
Math.log( (stateStateTransitions[previousState][state] + gamma) *
(stateStateTransitions[state][nextState] + 1 + gamma) /
(stateTransitionTotals[state] + 1 + gammaSum) );
}
else if (previousState == state) {
stateLogLikelihoods[state] =
Math.log( (stateStateTransitions[previousState][state] + gamma) *
(stateStateTransitions[state][nextState] + gamma) /
(stateTransitionTotals[state] + 1 + gammaSum) );
}
else {
stateLogLikelihoods[state] =
Math.log( (stateStateTransitions[previousState][state] + gamma) *
(stateStateTransitions[state][nextState] + gamma) /
(stateTransitionTotals[state] + gammaSum) );
}
if (Double.isInfinite(stateLogLikelihoods[state])) {
System.out.println("infinite middle: " + doc);
System.out.println(previousState + " -> " +
state + " -> " + nextState);
System.out.println(stateStateTransitions[previousState][state] + " -> " +
stateStateTransitions[state][nextState] + " / " +
stateTransitionTotals[state]);
}
}
}
}
double max = Double.NEGATIVE_INFINITY;
for (int state = 0; state < numStates; state++) {
stateLogLikelihoods[state] -= stateTransitionTotals[state] / 10;
currentStateTopicCounts = stateTopicCounts[state];
double[][] currentStateLogGammaCache = topicLogGammaCache[state];
int totalTokens = 0;
for (int topic: topicCounts.keys()) {
int count = topicCounts.get(topic);
// Cached Sampling Distribution
stateLogLikelihoods[state] += currentStateLogGammaCache[topic][count];
/*
// Hybrid version
if (count < currentStateLogGammaCache[topic].length) {
stateLogLikelihoods[state] += currentStateLogGammaCache[topic][count];
}
else {
int i = currentStateLogGammaCache[topic].length - 1;
stateLogLikelihoods[state] +=
currentStateLogGammaCache[topic][ i ];
for (; i < count; i++) {
stateLogLikelihoods[state] +=
Math.log(alpha[topic] + currentStateTopicCounts[topic] + i);
}
}
*/
/*
for (int j=0; j < count; j++) {
stateLogLikelihoods[state] +=
Math.log( (alpha[topic] + currentStateTopicCounts[topic] + j) /
(alphaSum + stateTopicTotals[state] + totalTokens) );
if (Double.isNaN(stateLogLikelihoods[state])) {
System.out.println("NaN: " + alpha[topic] + " + " +
currentStateTopicCounts[topic] + " + " +
j + ") /\n" +
"(" + alphaSum + " + " +
stateTopicTotals[state] + " + " + totalTokens);
}
totalTokens++;
}
*/
}
// Cached Sampling Distribution
stateLogLikelihoods[state] -= docLogGammaCache[state][ docLength ];
/*
// Hybrid version
if (docLength < docLogGammaCache[state].length) {
stateLogLikelihoods[state] -= docLogGammaCache[state][docLength];
}
else {
int i = docLogGammaCache[state].length - 1;
stateLogLikelihoods[state] -=
docLogGammaCache[state][ i ];
for (; i < docLength; i++) {
stateLogLikelihoods[state] -=
Math.log(alphaSum + stateTopicTotals[state] + i);
}
}
*/
if (stateLogLikelihoods[state] > max) {
max = stateLogLikelihoods[state];
}
}
double sum = 0.0;
for (int state = 0; state < numStates; state++) {
if (Double.isNaN(samplingDistribution[state])) {
System.out.println(stateLogLikelihoods[state]);
}
assert(! Double.isNaN(samplingDistribution[state]));
samplingDistribution[state] =
Math.exp(stateLogLikelihoods[state] - max);
sum += samplingDistribution[state];
if (Double.isNaN(samplingDistribution[state])) {
System.out.println(stateLogLikelihoods[state]);
}
assert(! Double.isNaN(samplingDistribution[state]));
if (doc % 100 == 0) {
//System.out.println(samplingDistribution[state]);
}
}
int newState = r.nextDiscrete(samplingDistribution, sum);
documentStates[doc] = newState;
for (int topic = 0; topic < numTopics; topic++) {
stateTopicCounts[newState][topic] += topicCounts.get(topic);
}
stateTopicTotals[newState] += docLength;
recacheStateTopicDistribution(newState, topicCounts);
if (initializing) {
// If we're initializing the states, don't bother
// looking at the next state.
if (previousSequenceID != sequenceID) {
initialStateCounts[newState]++;
}
else {
previousState = documentStates[doc-1];
stateStateTransitions[previousState][newState]++;
stateTransitionTotals[newState]++;
}
}
else {
if (previousSequenceID != sequenceID && sequenceID != nextSequenceID) {
// 1. This is a singleton document
initialStateCounts[newState]++;
}
else if (previousSequenceID != sequenceID) {
// 2. This is the beginning of a sequence
initialStateCounts[newState]++;
nextState = documentStates[doc+1];
stateStateTransitions[newState][nextState]++;
stateTransitionTotals[newState]++;
}
else if (sequenceID != nextSequenceID) {
// 3. This is the end of a sequence
previousState = documentStates[doc-1];
stateStateTransitions[previousState][newState]++;
}
else {
// 4. This is the middle of a sequence
previousState = documentStates[doc-1];
stateStateTransitions[previousState][newState]++;
nextState = documentStates[doc+1];
stateStateTransitions[newState][nextState]++;
stateTransitionTotals[newState]++;
}
}
}
public String printStateTransitions() {
StringBuffer out = new StringBuffer();
IDSorter[] sortedTopics = new IDSorter[numTopics];
for (int s = 0; s < numStates; s++) {
for (int topic=0; topic<numTopics; topic++) {
sortedTopics[topic] = new IDSorter(topic, (double) stateTopicCounts[s][topic] / stateTopicTotals[s]);
}
Arrays.sort(sortedTopics);
out.append("\n" + s + "\n");
for (int i=0; i<4; i++) {
int topic = sortedTopics[i].getID();
out.append(stateTopicCounts[s][topic] + "\t" + topicKeys[topic] + "\n");
}
out.append("\n");
out.append("[" + initialStateCounts[s] + "/" + numSequences + "] ");
out.append("[" + stateTransitionTotals[s] + "]");
for (int t = 0; t < numStates; t++) {
out.append("\t");
if (s == t) {
out.append("[" + stateStateTransitions[s][t] + "]");
}
else {
out.append(stateStateTransitions[s][t]);
}
}
out.append("\n");
}
return out.toString();
}
public String stateTransitionMatrix() {
StringBuffer out = new StringBuffer();
for (int s = 0; s < numStates; s++) {
for (int t = 0; t < numStates; t++) {
out.append(stateStateTransitions[s][t]);
out.append("\t");
}
out.append("\n");
}
return out.toString();
}
public String stateTopics() {
StringBuffer out = new StringBuffer();
for (int s = 0; s < numStates; s++) {
for (int topic=0; topic<numTopics; topic++) {
out.append(stateTopicCounts[s][topic] + "\t");
}
out.append("\n");
}
return out.toString();
}
public static void main (String[] args) throws IOException {
if (args.length != 4) {
System.err.println("Usage: MultinomialHMM [num topics] [lda state file] [lda keys file] [sequence metadata file]");
System.exit(0);
}
int numTopics = Integer.parseInt(args[0]);
MultinomialHMM hmm =
new MultinomialHMM (numTopics, args[1], 150);
hmm.setGamma(1.0);
hmm.setRandomSeed(1);
hmm.loadAlphaFromFile(args[2]);
hmm.loadSequenceIDsFromFile(args[3]);
hmm.initialize();
hmm.sample();
}
}
| 24,422 | 26.349384 | 169 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/TopicInferencer.java | package cc.mallet.topics;
import cc.mallet.types.*;
import cc.mallet.util.*;
import java.util.Arrays;
import java.io.*;
public class TopicInferencer implements Serializable {
protected int numTopics;
// These values are used to encode type/topic counts as
// count/topic pairs in a single int.
protected int topicMask;
protected int topicBits;
protected int numTypes;
protected double[] alpha;
protected double beta;
protected double betaSum;
protected int[][] typeTopicCounts;
protected int[] tokensPerTopic;
Alphabet alphabet;
protected Randoms random = null;
double smoothingOnlyMass = 0.0;
double[] cachedCoefficients;
public TopicInferencer (int[][] typeTopicCounts, int[] tokensPerTopic, Alphabet alphabet,
double[] alpha, double beta, double betaSum) {
this.tokensPerTopic = tokensPerTopic;
this.typeTopicCounts = typeTopicCounts;
this.alphabet = alphabet;
numTopics = tokensPerTopic.length;
numTypes = typeTopicCounts.length;
if (Integer.bitCount(numTopics) == 1) {
// exact power of 2
topicMask = numTopics - 1;
topicBits = Integer.bitCount(topicMask);
}
else {
// otherwise add an extra bit
topicMask = Integer.highestOneBit(numTopics) * 2 - 1;
topicBits = Integer.bitCount(topicMask);
}
this.alpha = alpha;
this.beta = beta;
this.betaSum = betaSum;
cachedCoefficients = new double[numTopics];
for (int topic=0; topic < numTopics; topic++) {
smoothingOnlyMass += alpha[topic] * beta / (tokensPerTopic[topic] + betaSum);
cachedCoefficients[topic] = alpha[topic] / (tokensPerTopic[topic] + betaSum);
}
random = new Randoms();
}
public void setRandomSeed(int seed) {
random = new Randoms(seed);
}
/**
* Use Gibbs sampling to infer a topic distribution.
* Topics are initialized to the (or a) most probable topic
* for each token. Using zero iterations returns exactly this
* initial topic distribution.<p/>
* This code does not adjust type-topic counts: P(w|t) is clamped.
*/
public double[] getSampledDistribution(Instance instance, int numIterations,
int thinning, int burnIn) {
FeatureSequence tokens = (FeatureSequence) instance.getData();
int docLength = tokens.size();
int[] topics = new int[docLength];
int[] localTopicCounts = new int[numTopics];
int[] localTopicIndex = new int[numTopics];
int type;
int[] currentTypeTopicCounts;
// Initialize all positions to the most common topic
// for that type.
for (int position = 0; position < docLength; position++) {
type = tokens.getIndexAtPosition(position);
// Ignore out of vocabulary terms
if (type < numTypes && typeTopicCounts[type].length != 0) {
currentTypeTopicCounts = typeTopicCounts[type];
// This value should be a topic such that
// no other topic has more tokens of this type
// assigned to it. If for some reason there were
// no tokens of this type in the training data, it
// will default to topic 0, which is no worse than
// random initialization.
topics[position] =
currentTypeTopicCounts[0] & topicMask;
localTopicCounts[topics[position]]++;
}
}
// Build an array that densely lists the topics that
// have non-zero counts.
int denseIndex = 0;
for (int topic = 0; topic < numTopics; topic++) {
if (localTopicCounts[topic] != 0) {
localTopicIndex[denseIndex] = topic;
denseIndex++;
}
}
// Record the total number of non-zero topics
int nonZeroTopics = denseIndex;
// Initialize the topic count/beta sampling bucket
double topicBetaMass = 0.0;
// Initialize cached coefficients and the topic/beta
// normalizing constant.
for (denseIndex = 0; denseIndex < nonZeroTopics; denseIndex++) {
int topic = localTopicIndex[denseIndex];
int n = localTopicCounts[topic];
// initialize the normalization constant for the (B * n_{t|d}) term
topicBetaMass += beta * n / (tokensPerTopic[topic] + betaSum);
// update the coefficients for the non-zero topics
cachedCoefficients[topic] = (alpha[topic] + n) / (tokensPerTopic[topic] + betaSum);
}
double topicTermMass = 0.0;
double[] topicTermScores = new double[numTopics];
int[] topicTermIndices;
int[] topicTermValues;
int i;
double score;
int oldTopic, newTopic;
double[] result = new double[numTopics];
double sum = 0.0;
for (int iteration = 1; iteration <= numIterations; iteration++) {
// Iterate over the positions (words) in the document
for (int position = 0; position < docLength; position++) {
type = tokens.getIndexAtPosition(position);
// ignore out-of-vocabulary terms
if (type >= numTypes || typeTopicCounts[type].length == 0) { continue; }
oldTopic = topics[position];
currentTypeTopicCounts = typeTopicCounts[type];
// Prepare to sample by adjusting existing counts.
// Note that we do not need to change the smoothing-only
// mass since the denominator is clamped.
topicBetaMass -= beta * localTopicCounts[oldTopic] /
(tokensPerTopic[oldTopic] + betaSum);
// Decrement the local doc/topic counts
localTopicCounts[oldTopic]--;
//assert(localTopicCounts[oldTopic] >= 0);
// Maintain the dense index, if we are deleting
// the old topic
if (localTopicCounts[oldTopic] == 0) {
// First get to the dense location associated with
// the old topic.
denseIndex = 0;
// We know it's in there somewhere, so we don't
// need bounds checking.
while (localTopicIndex[denseIndex] != oldTopic) {
denseIndex++;
}
// shift all remaining dense indices to the left.
while (denseIndex < nonZeroTopics) {
if (denseIndex < localTopicIndex.length - 1) {
localTopicIndex[denseIndex] =
localTopicIndex[denseIndex + 1];
}
denseIndex++;
}
nonZeroTopics --;
} // finished maintaining local topic index
topicBetaMass += beta * localTopicCounts[oldTopic] /
(tokensPerTopic[oldTopic] + betaSum);
// Reset the cached coefficient for this topic
cachedCoefficients[oldTopic] =
(alpha[oldTopic] + localTopicCounts[oldTopic]) /
(tokensPerTopic[oldTopic] + betaSum);
if (cachedCoefficients[oldTopic] <= 0) {
System.out.println("zero or less coefficient: " + oldTopic + " = (" + alpha[oldTopic] + " + " + localTopicCounts[oldTopic] + ") / ( " + tokensPerTopic[oldTopic] + " + " + betaSum + " );");
}
int index = 0;
int currentTopic, currentValue;
boolean alreadyDecremented = false;
topicTermMass = 0.0;
while (index < currentTypeTopicCounts.length &&
currentTypeTopicCounts[index] > 0) {
currentTopic = currentTypeTopicCounts[index] & topicMask;
currentValue = currentTypeTopicCounts[index] >> topicBits;
score =
cachedCoefficients[currentTopic] * currentValue;
topicTermMass += score;
topicTermScores[index] = score;
index++;
}
double sample = random.nextUniform() * (smoothingOnlyMass + topicBetaMass + topicTermMass);
double origSample = sample;
// Make sure it actually gets set
newTopic = -1;
if (sample < topicTermMass) {
//topicTermCount++;
i = -1;
while (sample > 0) {
i++;
sample -= topicTermScores[i];
}
newTopic = currentTypeTopicCounts[i] & topicMask;
}
else {
sample -= topicTermMass;
if (sample < topicBetaMass) {
//betaTopicCount++;
sample /= beta;
for (denseIndex = 0; denseIndex < nonZeroTopics; denseIndex++) {
int topic = localTopicIndex[denseIndex];
sample -= localTopicCounts[topic] /
(tokensPerTopic[topic] + betaSum);
if (sample <= 0.0) {
newTopic = topic;
break;
}
}
}
else {
sample -= topicBetaMass;
sample /= beta;
newTopic = 0;
sample -= alpha[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
while (sample > 0.0) {
newTopic++;
if (newTopic >= numTopics) {
index = 0;
while (index < currentTypeTopicCounts.length &&
currentTypeTopicCounts[index] > 0) {
currentTopic = currentTypeTopicCounts[index] & topicMask;
currentValue = currentTypeTopicCounts[index] >> topicBits;
System.out.println(currentTopic + "\t" + currentValue + "\t" + topicTermScores[index] +
"\t" + cachedCoefficients[currentTopic]);
index++;
}
}
sample -= alpha[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
}
}
}
topics[position] = newTopic;
topicBetaMass -= beta * localTopicCounts[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
localTopicCounts[newTopic]++;
// If this is a new topic for this document,
// add the topic to the dense index.
if (localTopicCounts[newTopic] == 1) {
// First find the point where we
// should insert the new topic by going to
// the end (which is the only reason we're keeping
// track of the number of non-zero
// topics) and working backwards
denseIndex = nonZeroTopics;
while (denseIndex > 0 &&
localTopicIndex[denseIndex - 1] > newTopic) {
localTopicIndex[denseIndex] =
localTopicIndex[denseIndex - 1];
denseIndex--;
}
localTopicIndex[denseIndex] = newTopic;
nonZeroTopics++;
}
// update the coefficients for the non-zero topics
cachedCoefficients[newTopic] =
(alpha[newTopic] + localTopicCounts[newTopic]) /
(tokensPerTopic[newTopic] + betaSum);
topicBetaMass += beta * localTopicCounts[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
}
if (iteration > burnIn &&
(iteration - burnIn) % thinning == 0) {
// Save a sample
for (int topic=0; topic < numTopics; topic++) {
result[topic] += alpha[topic] + localTopicCounts[topic];
sum += alpha[topic] + localTopicCounts[topic];
}
}
}
// Clean up our mess: reset the coefficients to values with only
// smoothing. The next doc will update its own non-zero topics...
for (denseIndex = 0; denseIndex < nonZeroTopics; denseIndex++) {
int topic = localTopicIndex[denseIndex];
cachedCoefficients[topic] =
alpha[topic] / (tokensPerTopic[topic] + betaSum);
}
if (sum == 0.0) {
// Save at least one sample
for (int topic=0; topic < numTopics; topic++) {
result[topic] = alpha[topic] + localTopicCounts[topic];
sum += result[topic];
}
}
// Normalize
for (int topic=0; topic < numTopics; topic++) {
result[topic] /= sum;
}
return result;
}
/**
* Infer topics for the provided instances and
* write distributions to the provided file.
*
* @param instances
* @param distributionsFile
* @param numIterations The total number of iterations of sampling per document
* @param thinning The number of iterations between saved samples
* @param burnIn The number of iterations before the first saved sample
* @param threshold The minimum proportion of a given topic that will be written
* @param max The total number of topics to report per document]
*/
public void writeInferredDistributions(InstanceList instances,
File distributionsFile,
int numIterations, int thinning, int burnIn,
double threshold, int max) throws IOException {
PrintWriter out = new PrintWriter(distributionsFile);
out.print ("#doc source topic proportion ...\n");
IDSorter[] sortedTopics = new IDSorter[ numTopics ];
for (int topic = 0; topic < numTopics; topic++) {
// Initialize the sorters with dummy values
sortedTopics[topic] = new IDSorter(topic, topic);
}
if (max < 0 || max > numTopics) {
max = numTopics;
}
int doc = 0;
for (Instance instance: instances) {
double[] topicDistribution =
getSampledDistribution(instance, numIterations,
thinning, burnIn);
out.print (doc); out.print (' ');
// Print the Source field of the instance
if (instance.getSource() != null) {
out.print (instance.getSource());
}
else {
out.print ("null-source");
}
out.print (' ');
for (int topic = 0; topic < numTopics; topic++) {
sortedTopics[topic].set(topic, topicDistribution[topic]);
}
Arrays.sort(sortedTopics);
for (int i = 0; i < max; i++) {
if (sortedTopics[i].getWeight() < threshold) { break; }
out.print (sortedTopics[i].getID() + " " +
sortedTopics[i].getWeight() + " ");
}
out.print (" \n");
doc++;
}
out.close();
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private static final int NULL_INTEGER = -1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject(alphabet);
out.writeInt(numTopics);
out.writeInt(topicMask);
out.writeInt(topicBits);
out.writeInt(numTypes);
out.writeObject(alpha);
out.writeDouble(beta);
out.writeDouble(betaSum);
out.writeObject(typeTopicCounts);
out.writeObject(tokensPerTopic);
out.writeObject(random);
out.writeDouble(smoothingOnlyMass);
out.writeObject(cachedCoefficients);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
alphabet = (Alphabet) in.readObject();
numTopics = in.readInt();
topicMask = in.readInt();
topicBits = in.readInt();
numTypes = in.readInt();
alpha = (double[]) in.readObject();
beta = in.readDouble();
betaSum = in.readDouble();
typeTopicCounts = (int[][]) in.readObject();
tokensPerTopic = (int[]) in.readObject();
random = (Randoms) in.readObject();
smoothingOnlyMass = in.readDouble();
cachedCoefficients = (double[]) in.readObject();
}
public static TopicInferencer read (File f) throws Exception {
TopicInferencer inferencer = null;
ObjectInputStream ois = new ObjectInputStream (new FileInputStream(f));
inferencer = (TopicInferencer) ois.readObject();
ois.close();
return inferencer;
}
} | 16,948 | 31.16129 | 193 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/HierarchicalLDA.java | package cc.mallet.topics;
import java.util.ArrayList;
import java.util.Arrays;
import java.io.*;
import cc.mallet.types.*;
import cc.mallet.util.Randoms;
import gnu.trove.*;
public class HierarchicalLDA {
InstanceList instances;
InstanceList testing;
NCRPNode rootNode, node;
int numLevels;
int numDocuments;
int numTypes;
double alpha; // smoothing on topic distributions
double gamma; // "imaginary" customers at the next, as yet unused table
double eta; // smoothing on word distributions
double etaSum;
int[][] levels; // indexed < doc, token >
NCRPNode[] documentLeaves; // currently selected path (ie leaf node) through the NCRP tree
int totalNodes = 0;
String stateFile = "hlda.state";
Randoms random;
boolean showProgress = true;
int displayTopicsInterval = 50;
int numWordsToDisplay = 10;
public HierarchicalLDA () {
alpha = 10.0;
gamma = 1.0;
eta = 0.1;
}
public void setAlpha(double alpha) {
this.alpha = alpha;
}
public void setGamma(double gamma) {
this.gamma = gamma;
}
public void setEta(double eta) {
this.eta = eta;
}
public void setStateFile(String stateFile) {
this.stateFile = stateFile;
}
public void setTopicDisplay(int interval, int words) {
displayTopicsInterval = interval;
numWordsToDisplay = words;
}
/**
* This parameter determines whether the sampler outputs
* shows progress by outputting a character after every iteration.
*/
public void setProgressDisplay(boolean showProgress) {
this.showProgress = showProgress;
}
public void initialize(InstanceList instances, InstanceList testing,
int numLevels, Randoms random) {
this.instances = instances;
this.testing = testing;
this.numLevels = numLevels;
this.random = random;
if (! (instances.get(0).getData() instanceof FeatureSequence)) {
throw new IllegalArgumentException("Input must be a FeatureSequence, using the --feature-sequence option when impoting data, for example");
}
numDocuments = instances.size();
numTypes = instances.getDataAlphabet().size();
etaSum = eta * numTypes;
// Initialize a single path
NCRPNode[] path = new NCRPNode[numLevels];
rootNode = new NCRPNode(numTypes);
levels = new int[numDocuments][];
documentLeaves = new NCRPNode[numDocuments];
// Initialize and fill the topic pointer arrays for
// every document. Set everything to the single path that
// we added earlier.
for (int doc=0; doc < numDocuments; doc++) {
FeatureSequence fs = (FeatureSequence) instances.get(doc).getData();
int seqLen = fs.getLength();
path[0] = rootNode;
rootNode.customers++;
for (int level = 1; level < numLevels; level++) {
path[level] = path[level-1].select();
path[level].customers++;
}
node = path[numLevels - 1];
levels[doc] = new int[seqLen];
documentLeaves[doc] = node;
for (int token=0; token < seqLen; token++) {
int type = fs.getIndexAtPosition(token);
levels[doc][token] = random.nextInt(numLevels);
node = path[ levels[doc][token] ];
node.totalTokens++;
node.typeCounts[type]++;
}
}
}
public void estimate(int numIterations) {
for (int iteration = 1; iteration <= numIterations; iteration++) {
for (int doc=0; doc < numDocuments; doc++) {
samplePath(doc, iteration);
}
for (int doc=0; doc < numDocuments; doc++) {
sampleTopics(doc);
}
if (showProgress) {
System.out.print(".");
if (iteration % 50 == 0) {
System.out.println(" " + iteration);
}
}
if (iteration % displayTopicsInterval == 0) {
printNodes();
}
}
}
public void samplePath(int doc, int iteration) {
NCRPNode[] path = new NCRPNode[numLevels];
NCRPNode node;
int level, token, type, topicCount;
double weight;
node = documentLeaves[doc];
for (level = numLevels - 1; level >= 0; level--) {
path[level] = node;
node = node.parent;
}
documentLeaves[doc].dropPath();
TObjectDoubleHashMap<NCRPNode> nodeWeights =
new TObjectDoubleHashMap<NCRPNode>();
// Calculate p(c_m | c_{-m})
calculateNCRP(nodeWeights, rootNode, 0.0);
// Add weights for p(w_m | c, w_{-m}, z)
// The path may have no further customers and therefore
// be unavailable, but it should still exist since we haven't
// reset documentLeaves[doc] yet...
TIntIntHashMap[] typeCounts = new TIntIntHashMap[numLevels];
int[] docLevels;
for (level = 0; level < numLevels; level++) {
typeCounts[level] = new TIntIntHashMap();
}
docLevels = levels[doc];
FeatureSequence fs = (FeatureSequence) instances.get(doc).getData();
// Save the counts of every word at each level, and remove
// counts from the current path
for (token = 0; token < docLevels.length; token++) {
level = docLevels[token];
type = fs.getIndexAtPosition(token);
if (! typeCounts[level].containsKey(type)) {
typeCounts[level].put(type, 1);
}
else {
typeCounts[level].increment(type);
}
path[level].typeCounts[type]--;
assert(path[level].typeCounts[type] >= 0);
path[level].totalTokens--;
assert(path[level].totalTokens >= 0);
}
// Calculate the weight for a new path at a given level.
double[] newTopicWeights = new double[numLevels];
for (level = 1; level < numLevels; level++) { // Skip the root...
int[] types = typeCounts[level].keys();
int totalTokens = 0;
for (int t: types) {
for (int i=0; i<typeCounts[level].get(t); i++) {
newTopicWeights[level] +=
Math.log((eta + i) / (etaSum + totalTokens));
totalTokens++;
}
}
//if (iteration > 1) { System.out.println(newTopicWeights[level]); }
}
calculateWordLikelihood(nodeWeights, rootNode, 0.0, typeCounts, newTopicWeights, 0, iteration);
NCRPNode[] nodes = nodeWeights.keys(new NCRPNode[] {});
double[] weights = new double[nodes.length];
double sum = 0.0;
double max = Double.NEGATIVE_INFINITY;
// To avoid underflow, we're using log weights and normalizing the node weights so that
// the largest weight is always 1.
for (int i=0; i<nodes.length; i++) {
if (nodeWeights.get(nodes[i]) > max) {
max = nodeWeights.get(nodes[i]);
}
}
for (int i=0; i<nodes.length; i++) {
weights[i] = Math.exp(nodeWeights.get(nodes[i]) - max);
/*
if (iteration > 1) {
if (nodes[i] == documentLeaves[doc]) {
System.out.print("* ");
}
System.out.println(((NCRPNode) nodes[i]).level + "\t" + weights[i] +
"\t" + nodeWeights.get(nodes[i]));
}
*/
sum += weights[i];
}
//if (iteration > 1) {System.out.println();}
node = nodes[ random.nextDiscrete(weights, sum) ];
// If we have picked an internal node, we need to
// add a new path.
if (! node.isLeaf()) {
node = node.getNewLeaf();
}
node.addPath();
documentLeaves[doc] = node;
for (level = numLevels - 1; level >= 0; level--) {
int[] types = typeCounts[level].keys();
for (int t: types) {
node.typeCounts[t] += typeCounts[level].get(t);
node.totalTokens += typeCounts[level].get(t);
}
node = node.parent;
}
}
public void calculateNCRP(TObjectDoubleHashMap<NCRPNode> nodeWeights,
NCRPNode node, double weight) {
for (NCRPNode child: node.children) {
calculateNCRP(nodeWeights, child,
weight + Math.log((double) child.customers / (node.customers + gamma)));
}
nodeWeights.put(node, weight + Math.log(gamma / (node.customers + gamma)));
}
public void calculateWordLikelihood(TObjectDoubleHashMap<NCRPNode> nodeWeights,
NCRPNode node, double weight,
TIntIntHashMap[] typeCounts, double[] newTopicWeights,
int level, int iteration) {
// First calculate the likelihood of the words at this level, given
// this topic.
double nodeWeight = 0.0;
int[] types = typeCounts[level].keys();
int totalTokens = 0;
//if (iteration > 1) { System.out.println(level + " " + nodeWeight); }
for (int type: types) {
for (int i=0; i<typeCounts[level].get(type); i++) {
nodeWeight +=
Math.log((eta + node.typeCounts[type] + i) /
(etaSum + node.totalTokens + totalTokens));
totalTokens++;
/*
if (iteration > 1) {
System.out.println("(" +eta + " + " + node.typeCounts[type] + " + " + i + ") /" +
"(" + etaSum + " + " + node.totalTokens + " + " + totalTokens + ")" +
" : " + nodeWeight);
}
*/
}
}
//if (iteration > 1) { System.out.println(level + " " + nodeWeight); }
// Propagate that weight to the child nodes
for (NCRPNode child: node.children) {
calculateWordLikelihood(nodeWeights, child, weight + nodeWeight,
typeCounts, newTopicWeights, level + 1, iteration);
}
// Finally, if this is an internal node, add the weight of
// a new path
level++;
while (level < numLevels) {
nodeWeight += newTopicWeights[level];
level++;
}
nodeWeights.adjustValue(node, nodeWeight);
}
/** Propagate a topic weight to a node and all its children.
weight is assumed to be a log.
*/
public void propagateTopicWeight(TObjectDoubleHashMap<NCRPNode> nodeWeights,
NCRPNode node, double weight) {
if (! nodeWeights.containsKey(node)) {
// calculating the NCRP prior proceeds from the
// root down (ie following child links),
// but adding the word-topic weights comes from
// the bottom up, following parent links and then
// child links. It's possible that the leaf node may have
// been removed just prior to this round, so the current
// node may not have an NCRP weight. If so, it's not
// going to be sampled anyway, so ditch it.
return;
}
for (NCRPNode child: node.children) {
propagateTopicWeight(nodeWeights, child, weight);
}
nodeWeights.adjustValue(node, weight);
}
public void sampleTopics(int doc) {
FeatureSequence fs = (FeatureSequence) instances.get(doc).getData();
int seqLen = fs.getLength();
int[] docLevels = levels[doc];
NCRPNode[] path = new NCRPNode[numLevels];
NCRPNode node;
int[] levelCounts = new int[numLevels];
int type, token, level;
double sum;
// Get the leaf
node = documentLeaves[doc];
for (level = numLevels - 1; level >= 0; level--) {
path[level] = node;
node = node.parent;
}
double[] levelWeights = new double[numLevels];
// Initialize level counts
for (token = 0; token < seqLen; token++) {
levelCounts[ docLevels[token] ]++;
}
for (token = 0; token < seqLen; token++) {
type = fs.getIndexAtPosition(token);
levelCounts[ docLevels[token] ]--;
node = path[ docLevels[token] ];
node.typeCounts[type]--;
node.totalTokens--;
sum = 0.0;
for (level=0; level < numLevels; level++) {
levelWeights[level] =
(alpha + levelCounts[level]) *
(eta + path[level].typeCounts[type]) /
(etaSum + path[level].totalTokens);
sum += levelWeights[level];
}
level = random.nextDiscrete(levelWeights, sum);
docLevels[token] = level;
levelCounts[ docLevels[token] ]++;
node = path[ level ];
node.typeCounts[type]++;
node.totalTokens++;
}
}
/**
* Writes the current sampling state to the file specified in <code>stateFile</code>.
*/
public void printState() throws IOException, FileNotFoundException {
printState(new PrintWriter(new BufferedWriter(new FileWriter(stateFile))));
}
/**
* Write a text file describing the current sampling state.
*/
public void printState(PrintWriter out) throws IOException {
int doc = 0;
Alphabet alphabet = instances.getDataAlphabet();
for (Instance instance: instances) {
FeatureSequence fs = (FeatureSequence) instance.getData();
int seqLen = fs.getLength();
int[] docLevels = levels[doc];
NCRPNode node;
int type, token, level;
StringBuffer path = new StringBuffer();
// Start with the leaf, and build a string describing the path for this doc
node = documentLeaves[doc];
for (level = numLevels - 1; level >= 0; level--) {
path.append(node.nodeID + " ");
node = node.parent;
}
for (token = 0; token < seqLen; token++) {
type = fs.getIndexAtPosition(token);
level = docLevels[token];
// The "" just tells java we're not trying to add a string and an int
out.println(path + "" + type + " " + alphabet.lookupObject(type) + " " + level + " ");
}
doc++;
}
}
public void printNodes() {
printNode(rootNode, 0);
}
public void printNode(NCRPNode node, int indent) {
StringBuffer out = new StringBuffer();
for (int i=0; i<indent; i++) {
out.append(" ");
}
out.append(node.totalTokens + "/" + node.customers + " ");
out.append(node.getTopWords(numWordsToDisplay));
System.out.println(out);
for (NCRPNode child: node.children) {
printNode(child, indent + 1);
}
}
/** For use with empirical likelihood evaluation:
* sample a path through the tree, then sample a multinomial over
* topics in that path, then return a weighted sum of words.
*/
public double empiricalLikelihood(int numSamples, InstanceList testing) {
NCRPNode[] path = new NCRPNode[numLevels];
NCRPNode node;
double weight;
path[0] = rootNode;
FeatureSequence fs;
int sample, level, type, token, doc, seqLen;
Dirichlet dirichlet = new Dirichlet(numLevels, alpha);
double[] levelWeights;
double[] multinomial = new double[numTypes];
double[][] likelihoods = new double[ testing.size() ][ numSamples ];
for (sample = 0; sample < numSamples; sample++) {
Arrays.fill(multinomial, 0.0);
for (level = 1; level < numLevels; level++) {
path[level] = path[level-1].selectExisting();
}
levelWeights = dirichlet.nextDistribution();
for (type = 0; type < numTypes; type++) {
for (level = 0; level < numLevels; level++) {
node = path[level];
multinomial[type] +=
levelWeights[level] *
(eta + node.typeCounts[type]) /
(etaSum + node.totalTokens);
}
}
for (type = 0; type < numTypes; type++) {
multinomial[type] = Math.log(multinomial[type]);
}
for (doc=0; doc<testing.size(); doc++) {
fs = (FeatureSequence) testing.get(doc).getData();
seqLen = fs.getLength();
for (token = 0; token < seqLen; token++) {
type = fs.getIndexAtPosition(token);
likelihoods[doc][sample] += multinomial[type];
}
}
}
double averageLogLikelihood = 0.0;
double logNumSamples = Math.log(numSamples);
for (doc=0; doc<testing.size(); doc++) {
double max = Double.NEGATIVE_INFINITY;
for (sample = 0; sample < numSamples; sample++) {
if (likelihoods[doc][sample] > max) {
max = likelihoods[doc][sample];
}
}
double sum = 0.0;
for (sample = 0; sample < numSamples; sample++) {
sum += Math.exp(likelihoods[doc][sample] - max);
}
averageLogLikelihood += Math.log(sum) + max - logNumSamples;
}
return averageLogLikelihood;
}
/**
* This method is primarily for testing purposes. The {@link cc.mallet.topics.tui.HierarchicalLDATUI}
* class has a more flexible interface for command-line use.
*/
public static void main (String[] args) {
try {
InstanceList instances = InstanceList.load(new File(args[0]));
InstanceList testing = InstanceList.load(new File(args[1]));
HierarchicalLDA sampler = new HierarchicalLDA();
sampler.initialize(instances, testing, 5, new Randoms());
sampler.estimate(250);
} catch (Exception e) {
e.printStackTrace();
}
}
class NCRPNode {
int customers;
ArrayList<NCRPNode> children;
NCRPNode parent;
int level;
int totalTokens;
int[] typeCounts;
public int nodeID;
public NCRPNode(NCRPNode parent, int dimensions, int level) {
customers = 0;
this.parent = parent;
children = new ArrayList<NCRPNode>();
this.level = level;
//System.out.println("new node at level " + level);
totalTokens = 0;
typeCounts = new int[dimensions];
nodeID = totalNodes;
totalNodes++;
}
public NCRPNode(int dimensions) {
this(null, dimensions, 0);
}
public NCRPNode addChild() {
NCRPNode node = new NCRPNode(this, typeCounts.length, level + 1);
children.add(node);
return node;
}
public boolean isLeaf() {
return level == numLevels - 1;
}
public NCRPNode getNewLeaf() {
NCRPNode node = this;
for (int l=level; l<numLevels - 1; l++) {
node = node.addChild();
}
return node;
}
public void dropPath() {
NCRPNode node = this;
node.customers--;
if (node.customers == 0) {
node.parent.remove(node);
}
for (int l = 1; l < numLevels; l++) {
node = node.parent;
node.customers--;
if (node.customers == 0) {
node.parent.remove(node);
}
}
}
public void remove(NCRPNode node) {
children.remove(node);
}
public void addPath() {
NCRPNode node = this;
node.customers++;
for (int l = 1; l < numLevels; l++) {
node = node.parent;
node.customers++;
}
}
public NCRPNode selectExisting() {
double[] weights = new double[children.size()];
int i = 0;
for (NCRPNode child: children) {
weights[i] = (double) child.customers / (gamma + customers);
i++;
}
int choice = random.nextDiscrete(weights);
return children.get(choice);
}
public NCRPNode select() {
double[] weights = new double[children.size() + 1];
weights[0] = gamma / (gamma + customers);
int i = 1;
for (NCRPNode child: children) {
weights[i] = (double) child.customers / (gamma + customers);
i++;
}
int choice = random.nextDiscrete(weights);
if (choice == 0) {
return(addChild());
}
else {
return children.get(choice - 1);
}
}
public String getTopWords(int numWords) {
IDSorter[] sortedTypes = new IDSorter[numTypes];
for (int type=0; type < numTypes; type++) {
sortedTypes[type] = new IDSorter(type, typeCounts[type]);
}
Arrays.sort(sortedTypes);
Alphabet alphabet = instances.getDataAlphabet();
StringBuffer out = new StringBuffer();
for (int i=0; i<10; i++) {
out.append(alphabet.lookupObject(sortedTypes[i].getID()) + " ");
}
return out.toString();
}
}
}
| 18,535 | 25.366999 | 142 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/LDAStream.java | /**
* Implement different Gibbs sampling based inference methods
*/
package cc.mallet.topics;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.zip.GZIPOutputStream;
import cc.mallet.types.FeatureCounter;
import cc.mallet.types.FeatureSequence;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.IDSorter;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.LabelAlphabet;
import cc.mallet.types.LabelSequence;
import cc.mallet.util.Randoms;
import gnu.trove.TIntIntHashMap;
/**
* @author Limin Yao, David Mimno
*
*/
public class LDAStream extends LDAHyper {
protected ArrayList<Topication> test; // the test instances and their topic assignments
/**
* @param numberOfTopics
*/
public LDAStream(int numberOfTopics) {
super(numberOfTopics);
// TODO Auto-generated constructor stub
}
/**
* @param numberOfTopics
* @param alphaSum
* @param beta
*/
public LDAStream(int numberOfTopics, double alphaSum, double beta) {
super(numberOfTopics, alphaSum, beta);
// TODO Auto-generated constructor stub
}
/**
* @param numberOfTopics
* @param alphaSum
* @param beta
* @param random
*/
public LDAStream(int numberOfTopics, double alphaSum, double beta,
Randoms random) {
super(numberOfTopics, alphaSum, beta, random);
// TODO Auto-generated constructor stub
}
/**
* @param topicAlphabet
* @param alphaSum
* @param beta
* @param random
*/
public LDAStream(LabelAlphabet topicAlphabet, double alphaSum, double beta,
Randoms random) {
super(topicAlphabet, alphaSum, beta, random);
// TODO Auto-generated constructor stub
}
public ArrayList<Topication> getTest() { return test; }
//first training a topic model on training data,
//inference on test data, count typeTopicCounts
// re-sampling on all data
public void inferenceAll(int maxIteration){
this.test = new ArrayList<Topication>(); //initialize test
//initial sampling on testdata
ArrayList<LabelSequence> topicSequences = new ArrayList<LabelSequence>();
for (Instance instance : testing) {
LabelSequence topicSequence = new LabelSequence(topicAlphabet, new int[instanceLength(instance)]);
if (false) {
// This method not yet obeying its last "false" argument, and must be for this to work
//sampleTopicsForOneDoc((FeatureSequence)instance.getData(), topicSequence, false, false);
} else {
Randoms r = new Randoms();
FeatureSequence fs = (FeatureSequence) instance.getData();
int[] topics = topicSequence.getFeatures();
for (int i = 0; i < topics.length; i++) {
int type = fs.getIndexAtPosition(i);
topics[i] = r.nextInt(numTopics);
typeTopicCounts[type].adjustOrPutValue(topics[i], 1, 1);
tokensPerTopic[topics[i]]++;
}
}
topicSequences.add (topicSequence);
}
//construct test
assert (testing.size() == topicSequences.size());
for (int i = 0; i < testing.size(); i++) {
Topication t = new Topication (testing.get(i), this, topicSequences.get(i));
test.add (t);
}
long startTime = System.currentTimeMillis();
//loop
int iter = 0;
for ( ; iter <= maxIteration; iter++) {
if(iter%100==0)
{
System.out.print("Iteration: " + iter);
System.out.println();
}
int numDocs = test.size(); // TODO
for (int di = 0; di < numDocs; di++) {
FeatureSequence tokenSequence = (FeatureSequence) test.get(di).instance.getData();
LabelSequence topicSequence = test.get(di).topicSequence;
sampleTopicsForOneTestDocAll (tokenSequence, topicSequence);
}
}
long seconds = Math.round((System.currentTimeMillis() - startTime)/1000.0);
long minutes = seconds / 60; seconds %= 60;
long hours = minutes / 60; minutes %= 60;
long days = hours / 24; hours %= 24;
System.out.print ("\nTotal inferencing time: ");
if (days != 0) { System.out.print(days); System.out.print(" days "); }
if (hours != 0) { System.out.print(hours); System.out.print(" hours "); }
if (minutes != 0) { System.out.print(minutes); System.out.print(" minutes "); }
System.out.print(seconds); System.out.println(" seconds");
}
//called by inferenceAll, using unseen words in testdata
private void sampleTopicsForOneTestDocAll(FeatureSequence tokenSequence,
LabelSequence topicSequence) {
// TODO Auto-generated method stub
int[] oneDocTopics = topicSequence.getFeatures();
TIntIntHashMap currentTypeTopicCounts;
int type, oldTopic, newTopic;
double tw;
double[] topicWeights = new double[numTopics];
double topicWeightsSum;
int docLength = tokenSequence.getLength();
// populate topic counts
int[] localTopicCounts = new int[numTopics];
for (int ti = 0; ti < numTopics; ti++){
localTopicCounts[ti] = 0;
}
for (int position = 0; position < docLength; position++) {
localTopicCounts[oneDocTopics[position]] ++;
}
// Iterate over the positions (words) in the document
for (int si = 0; si < docLength; si++) {
type = tokenSequence.getIndexAtPosition(si);
oldTopic = oneDocTopics[si];
// Remove this token from all counts
localTopicCounts[oldTopic] --;
currentTypeTopicCounts = typeTopicCounts[type];
assert(currentTypeTopicCounts.get(oldTopic) >= 0);
if (currentTypeTopicCounts.get(oldTopic) == 1) {
currentTypeTopicCounts.remove(oldTopic);
}
else {
currentTypeTopicCounts.adjustValue(oldTopic, -1);
}
tokensPerTopic[oldTopic]--;
// Build a distribution over topics for this token
Arrays.fill (topicWeights, 0.0);
topicWeightsSum = 0;
for (int ti = 0; ti < numTopics; ti++) {
tw = ((currentTypeTopicCounts.get(ti) + beta) / (tokensPerTopic[ti] + betaSum))
* ((localTopicCounts[ti] + alpha[ti])); // (/docLen-1+tAlpha); is constant across all topics
topicWeightsSum += tw;
topicWeights[ti] = tw;
}
// Sample a topic assignment from this distribution
newTopic = random.nextDiscrete (topicWeights, topicWeightsSum);
// Put that new topic into the counts
oneDocTopics[si] = newTopic;
currentTypeTopicCounts.adjustOrPutValue(newTopic, 1, 1);
localTopicCounts[newTopic] ++;
tokensPerTopic[newTopic]++;
}
}
//what do we have:
//typeTopicCounts, tokensPerTopic, topic-sequence of training and test data
public void estimateAll(int iteration) throws IOException {
//re-Gibbs sampling on all data
data.addAll(test);
initializeHistogramsAndCachedValues();
estimate(iteration);
}
//inference on testdata, one problem is how to deal with unseen words
//unseen words is in the Alphabet, but typeTopicsCount entry is null
//added by Limin Yao
/**
* @param maxIteration
* @param
*/
public void inference(int maxIteration){
this.test = new ArrayList<Topication>(); //initialize test
//initial sampling on testdata
ArrayList<LabelSequence> topicSequences = new ArrayList<LabelSequence>();
for (Instance instance : testing) {
LabelSequence topicSequence = new LabelSequence(topicAlphabet, new int[instanceLength(instance)]);
if (false) {
// This method not yet obeying its last "false" argument, and must be for this to work
//sampleTopicsForOneDoc((FeatureSequence)instance.getData(), topicSequence, false, false);
} else {
Randoms r = new Randoms();
FeatureSequence fs = (FeatureSequence) instance.getData();
int[] topics = topicSequence.getFeatures();
for (int i = 0; i < topics.length; i++) {
int type = fs.getIndexAtPosition(i);
topics[i] = r.nextInt(numTopics);
/* if(typeTopicCounts[type].size() != 0) {
topics[i] = r.nextInt(numTopics);
} else {
topics[i] = -1; // for unseen words
}*/
}
}
topicSequences.add (topicSequence);
}
//construct test
assert (testing.size() == topicSequences.size());
for (int i = 0; i < testing.size(); i++) {
Topication t = new Topication (testing.get(i), this, topicSequences.get(i));
test.add (t);
// Include sufficient statistics for this one doc
// add count on new data to n[k][w] and n[k][*]
// pay attention to unseen words
FeatureSequence tokenSequence = (FeatureSequence) t.instance.getData();
LabelSequence topicSequence = t.topicSequence;
for (int pi = 0; pi < topicSequence.getLength(); pi++) {
int topic = topicSequence.getIndexAtPosition(pi);
int type = tokenSequence.getIndexAtPosition(pi);
if(topic != -1) // type seen in training
{
typeTopicCounts[type].adjustOrPutValue(topic, 1, 1);
tokensPerTopic[topic]++;
}
}
}
long startTime = System.currentTimeMillis();
//loop
int iter = 0;
for ( ; iter <= maxIteration; iter++) {
if(iter%100==0)
{
System.out.print("Iteration: " + iter);
System.out.println();
}
int numDocs = test.size(); // TODO
for (int di = 0; di < numDocs; di++) {
FeatureSequence tokenSequence = (FeatureSequence) test.get(di).instance.getData();
LabelSequence topicSequence = test.get(di).topicSequence;
sampleTopicsForOneTestDoc (tokenSequence, topicSequence);
}
}
long seconds = Math.round((System.currentTimeMillis() - startTime)/1000.0);
long minutes = seconds / 60; seconds %= 60;
long hours = minutes / 60; minutes %= 60;
long days = hours / 24; hours %= 24;
System.out.print ("\nTotal inferencing time: ");
if (days != 0) { System.out.print(days); System.out.print(" days "); }
if (hours != 0) { System.out.print(hours); System.out.print(" hours "); }
if (minutes != 0) { System.out.print(minutes); System.out.print(" minutes "); }
System.out.print(seconds); System.out.println(" seconds");
}
private void sampleTopicsForOneTestDoc(FeatureSequence tokenSequence,
LabelSequence topicSequence) {
// TODO Auto-generated method stub
int[] oneDocTopics = topicSequence.getFeatures();
TIntIntHashMap currentTypeTopicCounts;
int type, oldTopic, newTopic;
double tw;
double[] topicWeights = new double[numTopics];
double topicWeightsSum;
int docLength = tokenSequence.getLength();
// populate topic counts
int[] localTopicCounts = new int[numTopics];
for (int ti = 0; ti < numTopics; ti++){
localTopicCounts[ti] = 0;
}
for (int position = 0; position < docLength; position++) {
if(oneDocTopics[position] != -1) {
localTopicCounts[oneDocTopics[position]] ++;
}
}
// Iterate over the positions (words) in the document
for (int si = 0; si < docLength; si++) {
type = tokenSequence.getIndexAtPosition(si);
oldTopic = oneDocTopics[si];
if(oldTopic == -1) {
continue;
}
// Remove this token from all counts
localTopicCounts[oldTopic] --;
currentTypeTopicCounts = typeTopicCounts[type];
assert(currentTypeTopicCounts.get(oldTopic) >= 0);
if (currentTypeTopicCounts.get(oldTopic) == 1) {
currentTypeTopicCounts.remove(oldTopic);
}
else {
currentTypeTopicCounts.adjustValue(oldTopic, -1);
}
tokensPerTopic[oldTopic]--;
// Build a distribution over topics for this token
Arrays.fill (topicWeights, 0.0);
topicWeightsSum = 0;
for (int ti = 0; ti < numTopics; ti++) {
tw = ((currentTypeTopicCounts.get(ti) + beta) / (tokensPerTopic[ti] + betaSum))
* ((localTopicCounts[ti] + alpha[ti])); // (/docLen-1+tAlpha); is constant across all topics
topicWeightsSum += tw;
topicWeights[ti] = tw;
}
// Sample a topic assignment from this distribution
newTopic = random.nextDiscrete (topicWeights, topicWeightsSum);
// Put that new topic into the counts
oneDocTopics[si] = newTopic;
currentTypeTopicCounts.adjustOrPutValue(newTopic, 1, 1);
localTopicCounts[newTopic] ++;
tokensPerTopic[newTopic]++;
}
}
//inference method 3, for each doc, for each iteration, for each word
//compare against inference(that is method2): for each iter, for each doc, for each word
public void inferenceOneByOne(int maxIteration){
this.test = new ArrayList<Topication>(); //initialize test
//initial sampling on testdata
ArrayList<LabelSequence> topicSequences = new ArrayList<LabelSequence>();
for (Instance instance : testing) {
LabelSequence topicSequence = new LabelSequence(topicAlphabet, new int[instanceLength(instance)]);
if (false) {
// This method not yet obeying its last "false" argument, and must be for this to work
//sampleTopicsForOneDoc((FeatureSequence)instance.getData(), topicSequence, false, false);
} else {
Randoms r = new Randoms();
FeatureSequence fs = (FeatureSequence) instance.getData();
int[] topics = topicSequence.getFeatures();
for (int i = 0; i < topics.length; i++) {
int type = fs.getIndexAtPosition(i);
topics[i] = r.nextInt(numTopics);
typeTopicCounts[type].adjustOrPutValue(topics[i], 1, 1);
tokensPerTopic[topics[i]]++;
/* if(typeTopicCounts[type].size() != 0) {
topics[i] = r.nextInt(numTopics);
typeTopicCounts[type].adjustOrPutValue(topics[i], 1, 1);
tokensPerTopic[topics[i]]++;
} else {
topics[i] = -1; // for unseen words
}*/
}
}
topicSequences.add (topicSequence);
}
//construct test
assert (testing.size() == topicSequences.size());
for (int i = 0; i < testing.size(); i++) {
Topication t = new Topication (testing.get(i), this, topicSequences.get(i));
test.add (t);
}
long startTime = System.currentTimeMillis();
//loop
int iter = 0;
int numDocs = test.size(); // TODO
for (int di = 0; di < numDocs; di++) {
iter = 0;
FeatureSequence tokenSequence = (FeatureSequence) test.get(di).instance.getData();
LabelSequence topicSequence = test.get(di).topicSequence;
for( ; iter <= maxIteration; iter++) {
sampleTopicsForOneTestDoc (tokenSequence, topicSequence);
}
if(di%100==0)
{
System.out.print("Docnum: " + di);
System.out.println();
}
}
long seconds = Math.round((System.currentTimeMillis() - startTime)/1000.0);
long minutes = seconds / 60; seconds %= 60;
long hours = minutes / 60; minutes %= 60;
long days = hours / 24; hours %= 24;
System.out.print ("\nTotal inferencing time: ");
if (days != 0) { System.out.print(days); System.out.print(" days "); }
if (hours != 0) { System.out.print(hours); System.out.print(" hours "); }
if (minutes != 0) { System.out.print(minutes); System.out.print(" minutes "); }
System.out.print(seconds); System.out.println(" seconds");
}
public void inferenceWithTheta(int maxIteration, InstanceList theta){
this.test = new ArrayList<Topication>(); //initialize test
//initial sampling on testdata
ArrayList<LabelSequence> topicSequences = new ArrayList<LabelSequence>();
for (Instance instance : testing) {
LabelSequence topicSequence = new LabelSequence(topicAlphabet, new int[instanceLength(instance)]);
if (false) {
// This method not yet obeying its last "false" argument, and must be for this to work
//sampleTopicsForOneDoc((FeatureSequence)instance.getData(), topicSequence, false, false);
} else {
Randoms r = new Randoms();
FeatureSequence fs = (FeatureSequence) instance.getData();
int[] topics = topicSequence.getFeatures();
for (int i = 0; i < topics.length; i++) {
int type = fs.getIndexAtPosition(i);
topics[i] = r.nextInt(numTopics);
}
}
topicSequences.add (topicSequence);
}
//construct test
assert (testing.size() == topicSequences.size());
for (int i = 0; i < testing.size(); i++) {
Topication t = new Topication (testing.get(i), this, topicSequences.get(i));
test.add (t);
// Include sufficient statistics for this one doc
// add count on new data to n[k][w] and n[k][*]
// pay attention to unseen words
FeatureSequence tokenSequence = (FeatureSequence) t.instance.getData();
LabelSequence topicSequence = t.topicSequence;
for (int pi = 0; pi < topicSequence.getLength(); pi++) {
int topic = topicSequence.getIndexAtPosition(pi);
int type = tokenSequence.getIndexAtPosition(pi);
if(topic != -1) // type seen in training
{
typeTopicCounts[type].adjustOrPutValue(topic, 1, 1);
tokensPerTopic[topic]++;
}
}
}
long startTime = System.currentTimeMillis();
//loop
int iter = 0;
for ( ; iter <= maxIteration; iter++) {
if(iter%100==0)
{
System.out.print("Iteration: " + iter);
System.out.println();
}
int numDocs = test.size(); // TODO
for (int di = 0; di < numDocs; di++) {
FeatureVector fvTheta = (FeatureVector) theta.get(di).getData();
double[] topicDistribution = fvTheta.getValues();
FeatureSequence tokenSequence = (FeatureSequence) test.get(di).instance.getData();
LabelSequence topicSequence = test.get(di).topicSequence;
sampleTopicsForOneDocWithTheta (tokenSequence, topicSequence, topicDistribution);
}
}
long seconds = Math.round((System.currentTimeMillis() - startTime)/1000.0);
long minutes = seconds / 60; seconds %= 60;
long hours = minutes / 60; minutes %= 60;
long days = hours / 24; hours %= 24;
System.out.print ("\nTotal inferencing time: ");
if (days != 0) { System.out.print(days); System.out.print(" days "); }
if (hours != 0) { System.out.print(hours); System.out.print(" hours "); }
if (minutes != 0) { System.out.print(minutes); System.out.print(" minutes "); }
System.out.print(seconds); System.out.println(" seconds");
}
//sampling with known theta, from maxent
private void sampleTopicsForOneDocWithTheta(FeatureSequence tokenSequence,
LabelSequence topicSequence, double[] topicDistribution) {
// TODO Auto-generated method stub
int[] oneDocTopics = topicSequence.getFeatures();
TIntIntHashMap currentTypeTopicCounts;
int type, oldTopic, newTopic;
double tw;
double[] topicWeights = new double[numTopics];
double topicWeightsSum;
int docLength = tokenSequence.getLength();
// Iterate over the positions (words) in the document
for (int si = 0; si < docLength; si++) {
type = tokenSequence.getIndexAtPosition(si);
oldTopic = oneDocTopics[si];
if(oldTopic == -1) {
continue;
}
currentTypeTopicCounts = typeTopicCounts[type];
assert(currentTypeTopicCounts.get(oldTopic) >= 0);
if (currentTypeTopicCounts.get(oldTopic) == 1) {
currentTypeTopicCounts.remove(oldTopic);
}
else {
currentTypeTopicCounts.adjustValue(oldTopic, -1);
}
tokensPerTopic[oldTopic]--;
// Build a distribution over topics for this token
Arrays.fill (topicWeights, 0.0);
topicWeightsSum = 0;
for (int ti = 0; ti < numTopics; ti++) {
tw = ((currentTypeTopicCounts.get(ti) + beta) / (tokensPerTopic[ti] + betaSum))
* topicDistribution[ti]; // (/docLen-1+tAlpha); is constant across all topics
topicWeightsSum += tw;
topicWeights[ti] = tw;
}
// Sample a topic assignment from this distribution
newTopic = random.nextDiscrete (topicWeights, topicWeightsSum);
// Put that new topic into the counts
oneDocTopics[si] = newTopic;
currentTypeTopicCounts.adjustOrPutValue(newTopic, 1, 1);
tokensPerTopic[newTopic]++;
}
}
//print human readable doc-topic matrix, for further IR use
public void printTheta(ArrayList<Topication> dataset, File f, double threshold, int max) throws IOException{
PrintWriter pw = new PrintWriter(new FileWriter(f));
int[] topicCounts = new int[ numTopics ];
int docLen;
for (int di = 0; di < dataset.size(); di++) {
LabelSequence topicSequence = dataset.get(di).topicSequence;
int[] currentDocTopics = topicSequence.getFeatures();
docLen = currentDocTopics.length;
for (int token=0; token < docLen; token++) {
topicCounts[ currentDocTopics[token] ]++;
}
pw.println(dataset.get(di).instance.getName());
// n(t|d)+alpha(t) / docLen + alphaSum
for (int topic = 0; topic < numTopics; topic++) {
double prob = (double) (topicCounts[topic]+alpha[topic]) / (docLen + alphaSum);
pw.println("topic"+ topic + "\t" + prob);
}
pw.println();
Arrays.fill(topicCounts, 0);
}
pw.close();
}
//print topic-word matrix, for further IR use
public void printPhi(File f, double threshold) throws IOException{
PrintWriter pw = new PrintWriter(new FileWriter(f));
FeatureCounter[] wordCountsPerTopic = new FeatureCounter[numTopics];
for (int ti = 0; ti < numTopics; ti++) {
wordCountsPerTopic[ti] = new FeatureCounter(alphabet);
}
for (int fi = 0; fi < numTypes; fi++) {
int[] topics = typeTopicCounts[fi].keys();
for (int i = 0; i < topics.length; i++) {
wordCountsPerTopic[topics[i]].increment(fi, typeTopicCounts[fi].get(topics[i]));
}
}
for(int ti = 0; ti < numTopics; ti++){
pw.println("Topic\t" + ti);
FeatureCounter counter = wordCountsPerTopic[ti];
FeatureVector fv = counter.toFeatureVector();
for(int pos = 0; pos < fv.numLocations(); pos++){
int fi = fv.indexAtLocation(pos);
String word = (String) alphabet.lookupObject(fi);
int count = (int) fv.valueAtLocation(pos);
double prob;
prob = (double) (count+beta)/(tokensPerTopic[ti] + betaSum);
pw.println(word + "\t" + prob);
}
pw.println();
}
pw.close();
}
public void printDocumentTopics (ArrayList<Topication> dataset, File f) throws IOException {
printDocumentTopics (dataset, new PrintWriter (new FileWriter (f) ) );
}
public void printDocumentTopics (ArrayList<Topication> dataset, PrintWriter pw) {
printDocumentTopics (dataset, pw, 0.0, -1);
}
/**
* @param pw A print writer
* @param threshold Only print topics with proportion greater than this number
* @param max Print no more than this many topics
*/
public void printDocumentTopics (ArrayList<Topication> dataset, PrintWriter pw, double threshold, int max) {
pw.print ("#doc source topic proportion ...\n");
int docLen;
int[] topicCounts = new int[ numTopics ];
IDSorter[] sortedTopics = new IDSorter[ numTopics ];
for (int topic = 0; topic < numTopics; topic++) {
// Initialize the sorters with dummy values
sortedTopics[topic] = new IDSorter(topic, topic);
}
if (max < 0 || max > numTopics) {
max = numTopics;
}
for (int di = 0; di < dataset.size(); di++) {
LabelSequence topicSequence = dataset.get(di).topicSequence;
int[] currentDocTopics = topicSequence.getFeatures();
pw.print (di); pw.print (' ');
if (dataset.get(di).instance.getSource() != null) {
pw.print (dataset.get(di).instance.getSource());
}
else {
pw.print ("null-source");
}
pw.print (' ');
docLen = currentDocTopics.length;
// Count up the tokens
int realDocLen = 0;
for (int token=0; token < docLen; token++) {
if(currentDocTopics[token] != -1) {
topicCounts[ currentDocTopics[token] ]++;
realDocLen ++;
}
}
assert(realDocLen == docLen);
alphaSum=0.0;
for(int topic=0; topic < numTopics; topic++){
alphaSum+=alpha[topic];
}
// And normalize and smooth by Dirichlet prior alpha
for (int topic = 0; topic < numTopics; topic++) {
sortedTopics[topic].set(topic, (double) (topicCounts[topic]+alpha[topic]) / (docLen + alphaSum));
}
Arrays.sort(sortedTopics);
for (int i = 0; i < max; i++) {
if (sortedTopics[i].getWeight() < threshold) { break; }
pw.print (sortedTopics[i].getID() + " " +
sortedTopics[i].getWeight() + " ");
}
pw.print (" \n");
Arrays.fill(topicCounts, 0);
}
pw.close();
}
public void printState (ArrayList<Topication> dataset, File f) throws IOException {
PrintStream out =
new PrintStream(new GZIPOutputStream(new BufferedOutputStream(new FileOutputStream(f))));
printState(dataset, out);
out.close();
}
public void printState (ArrayList<Topication> dataset, PrintStream out) {
out.println ("#doc source pos typeindex type topic");
for (int di = 0; di < dataset.size(); di++) {
FeatureSequence tokenSequence = (FeatureSequence) dataset.get(di).instance.getData();
LabelSequence topicSequence = dataset.get(di).topicSequence;
String source = "NA";
if (dataset.get(di).instance.getSource() != null) {
source = dataset.get(di).instance.getSource().toString();
}
for (int pi = 0; pi < topicSequence.getLength(); pi++) {
int type = tokenSequence.getIndexAtPosition(pi);
int topic = topicSequence.getIndexAtPosition(pi);
out.print(di); out.print(' ');
out.print(source); out.print(' ');
out.print(pi); out.print(' ');
out.print(type); out.print(' ');
out.print(alphabet.lookupObject(type)); out.print(' ');
out.print(topic); out.println();
}
}
}
}
| 24,648 | 33.234722 | 109 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/tui/HierarchicalLDATUI.java | package cc.mallet.topics.tui;
import cc.mallet.util.CommandOption;
import cc.mallet.util.Randoms;
import cc.mallet.types.InstanceList;
import cc.mallet.topics.HierarchicalLDA;
import java.io.*;
public class HierarchicalLDATUI {
static CommandOption.String inputFile = new CommandOption.String
(HierarchicalLDATUI.class, "input", "FILENAME", true, null,
"The filename from which to read the list of training instances. Use - for stdin. " +
"The instances must be FeatureSequence or FeatureSequenceWithBigrams, not FeatureVector", null);
static CommandOption.String testingFile = new CommandOption.String
(HierarchicalLDATUI.class, "testing", "FILENAME", true, null,
"The filename from which to read the list of instances for held-out likelihood calculation. Use - for stdin. " +
"The instances must be FeatureSequence or FeatureSequenceWithBigrams, not FeatureVector", null);
static CommandOption.String stateFile = new CommandOption.String
(HierarchicalLDATUI.class, "output-state", "FILENAME", true, null,
"The filename in which to write the Gibbs sampling state after at the end of the iterations. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.Integer randomSeed = new CommandOption.Integer
(HierarchicalLDATUI.class, "random-seed", "INTEGER", true, 0,
"The random seed for the Gibbs sampler. Default is 0, which will use the clock.", null);
static CommandOption.Integer numIterations = new CommandOption.Integer
(Vectors2Topics.class, "num-iterations", "INTEGER", true, 1000,
"The number of iterations of Gibbs sampling.", null);
static CommandOption.Boolean showProgress = new CommandOption.Boolean
(HierarchicalLDATUI.class, "show-progress", "BOOLEAN", false, true,
"If true, print a character to standard output after every sampling iteration.", null);
static CommandOption.Integer showTopicsInterval = new CommandOption.Integer
(HierarchicalLDATUI.class, "show-topics-interval", "INTEGER", true, 50,
"The number of iterations between printing a brief summary of the topics so far.", null);
static CommandOption.Integer topWords = new CommandOption.Integer
(HierarchicalLDATUI.class, "num-top-words", "INTEGER", true, 20,
"The number of most probable words to print for each topic after model estimation.", null);
static CommandOption.Integer numLevels = new CommandOption.Integer
(HierarchicalLDATUI.class, "num-levels", "INTEGER", true, 3,
"The number of levels in the tree.", null);
static CommandOption.Double alpha = new CommandOption.Double
(HierarchicalLDATUI.class, "alpha", "DECIMAL", true, 10.0,
"Alpha parameter: smoothing over level distributions.", null);
static CommandOption.Double gamma = new CommandOption.Double
(HierarchicalLDATUI.class, "gamma", "DECIMAL", true, 1.0,
"Gamma parameter: CRP smoothing parameter; number of imaginary customers at next, as yet unused table", null);
static CommandOption.Double eta = new CommandOption.Double
(HierarchicalLDATUI.class, "eta", "DECIMAL", true, 0.1,
"Eta parameter: smoothing over topic-word distributions", null);
public static void main (String[] args) throws java.io.IOException {
// Process the command-line options
CommandOption.setSummary (HierarchicalLDATUI.class,
"Hierarchical LDA with a fixed tree depth.");
CommandOption.process (HierarchicalLDATUI.class, args);
// Load instance lists
if (inputFile.value() == null) {
System.err.println("Input instance list is required, use --input option");
System.exit(1);
}
InstanceList instances = InstanceList.load(new File(inputFile.value()));
InstanceList testing = null;
if (testingFile.value() != null) {
testing = InstanceList.load(new File(testingFile.value()));
}
HierarchicalLDA hlda = new HierarchicalLDA();
// Set hyperparameters
hlda.setAlpha(alpha.value());
hlda.setGamma(gamma.value());
hlda.setEta(eta.value());
// Display preferences
hlda.setTopicDisplay(showTopicsInterval.value(), topWords.value());
hlda.setProgressDisplay(showProgress.value());
// Initialize random number generator
Randoms random = null;
if (randomSeed.value() == 0) {
random = new Randoms();
}
else {
random = new Randoms(randomSeed.value());
}
// Initialize and start the sampler
hlda.initialize(instances, testing, numLevels.value(), random);
hlda.estimate(numIterations.value());
// Output results
if (stateFile.value() != null) {
hlda.printState(new PrintWriter(stateFile.value()));
}
if (testing != null) {
double empiricalLikelihood = hlda.empiricalLikelihood(1000, testing);
System.out.println("Empirical likelihood: " + empiricalLikelihood);
}
}
} | 4,758 | 37.691057 | 117 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/tui/EvaluateTopics.java | package cc.mallet.topics.tui;
import cc.mallet.util.*;
import cc.mallet.types.*;
import cc.mallet.topics.*;
import java.io.*;
public class EvaluateTopics {
static CommandOption.String evaluatorFilename = new CommandOption.String
(EvaluateTopics.class, "evaluator", "FILENAME", true, null,
"A serialized topic evaluator from a trained topic model.\n" +
"By default this is null, indicating that no file will be read.", null);
static CommandOption.String inputFile = new CommandOption.String
(EvaluateTopics.class, "input", "FILENAME", true, null,
"The filename from which to read the list of instances\n" +
"for which topics should be inferred. Use - for stdin. " +
"The instances must be FeatureSequence or FeatureSequenceWithBigrams, not FeatureVector", null);
static CommandOption.String docProbabilityFile = new CommandOption.String
(EvaluateTopics.class, "output-doc-probs", "FILENAME", true, null,
"The filename in which to write the inferred log probabilities\n" +
"per document. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String probabilityFile = new CommandOption.String
(EvaluateTopics.class, "output-prob", "FILENAME", true, "-",
"The filename in which to write the inferred log probability of the testing set\n" +
"Use - for stdout, which is the default.", null);
static CommandOption.Integer numParticles = new CommandOption.Integer
(EvaluateTopics.class, "num-particles", "INTEGER", true, 10,
"The number of particles to use in left-to-right evaluation.", null);
static CommandOption.Boolean usingResampling = new CommandOption.Boolean
(EvaluateTopics.class, "use-resampling", "TRUE|FALSE", false, false,
"Whether to resample topics in left-to-right evaluation. Resampling is more accurate, but leads to quadratic scaling in the lenght of documents.", null);
static CommandOption.Integer numIterations = new CommandOption.Integer
(EvaluateTopics.class, "num-iterations", "INTEGER", true, 100,
"The number of iterations of Gibbs sampling.", null);
static CommandOption.Integer sampleInterval = new CommandOption.Integer
(EvaluateTopics.class, "sample-interval", "INTEGER", true, 10,
"The number of iterations between saved samples.", null);
static CommandOption.Integer burnInIterations = new CommandOption.Integer
(EvaluateTopics.class, "burn-in", "INTEGER", true, 10,
"The number of iterations before the first sample is saved.", null);
static CommandOption.Integer randomSeed = new CommandOption.Integer
(EvaluateTopics.class, "random-seed", "INTEGER", true, 0,
"The random seed for the Gibbs sampler. Default is 0, which will use the clock.", null);
public static void main (String[] args) {
// Process the command-line options
CommandOption.setSummary (EvaluateTopics.class,
"Estimate the marginal probability of new documents under ");
CommandOption.process (EvaluateTopics.class, args);
if (evaluatorFilename.value == null) {
System.err.println("You must specify a serialized topic evaluator. Use --help to list options.");
System.exit(0);
}
if (inputFile.value == null) {
System.err.println("You must specify a serialized instance list. Use --help to list options.");
System.exit(0);
}
try {
PrintStream docProbabilityStream = null;
if (docProbabilityFile.value != null) {
docProbabilityStream = new PrintStream(docProbabilityFile.value);
}
PrintStream outputStream = System.out;
if (probabilityFile.value != null &&
! probabilityFile.value.equals("-")) {
outputStream = new PrintStream(probabilityFile.value);
}
MarginalProbEstimator evaluator =
MarginalProbEstimator.read(new File(evaluatorFilename.value));
InstanceList instances = InstanceList.load (new File(inputFile.value));
outputStream.println(evaluator.evaluateLeftToRight(instances, numParticles.value,
usingResampling.value,
docProbabilityStream));
} catch (Exception e) {
e.printStackTrace();
System.err.println(e.getMessage());
}
}
}
| 4,302 | 40.776699 | 162 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/tui/TopicTrain.java | package cc.mallet.topics.tui;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.ObjectOutputStream;
import java.io.PrintWriter;
import cc.mallet.topics.LDAStream;
import cc.mallet.types.InstanceList;
import cc.mallet.util.CommandOption;
//This TUI first train a topic model, and save the model to modelFile
public class TopicTrain {
/**
* @param args
*/
//inputFile for training sequence, testFile for testing sequence, the two should share the same Alphabets
static CommandOption.String inputFile = new CommandOption.String
(TopicTrain.class, "input", "FILENAME", true, null,
"The filename from which to read the list of training instances. Use - for stdin. " +
"The instances must be FeatureSequence or FeatureSequenceWithBigrams, not FeatureVector", null);
static CommandOption.String testFile = new CommandOption.String
(TopicTrain.class, "test", "FILENAME", true, null,
"The filename from which to read the list of training instances. Use - for stdin. " +
"The instances must be FeatureSequence or FeatureSequenceWithBigrams, not FeatureVector", null);
static CommandOption.String outputModelFilename = new CommandOption.String
(TopicTrain.class, "output-model", "FILENAME", true, null,
"The filename in which to write the binary topic model at the end of the iterations. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String inputModelFilename = new CommandOption.String
(TopicTrain.class, "input-model", "FILENAME", true, null,
"The filename from which to read the binary topic model to which the --input will be appended, " +
"allowing incremental training. " +
"By default this is null, indicating that no file will be read.", null);
static CommandOption.String stateFile = new CommandOption.String
(TopicTrain.class, "output-state", "FILENAME", true, null,
"The filename in which to write the Gibbs sampling state after at the end of the iterations. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String stateTestFile = new CommandOption.String
(TopicTrain.class, "output-state-test", "FILENAME", true, null,
"The filename in which to write the Gibbs sampling state for test after at the end of the iterations. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String topicKeysFile = new CommandOption.String
(TopicTrain.class, "output-topic-keys", "FILENAME", true, null,
"The filename in which to write the top words for each topic and any Dirichlet parameters. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String topicTypesFile = new CommandOption.String
(TopicTrain.class, "output-type-topics", "FILENAME", true, null,
"The filename in which to write the matrix of phi, the probability of each type/word belonging to each topic " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String docTopicsFile = new CommandOption.String
(TopicTrain.class, "output-doc-topics", "FILENAME", true, null,
"The filename in which to write the topic proportions per document, at the end of the iterations. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String testDocTopicsFile = new CommandOption.String
(TopicTrain.class, "output-testdoc-topics", "inf.theta", true, null,
"The filename in which to write the topic proportions per test document, at the end of the iterations. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String serialTestDocTopicsFile = new CommandOption.String
(TopicTrain.class, "output-testdoc-serial", "inf.theta", true, null,
"The filename in which to write the topic proportions per test document, at the end of the iterations. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.Double docTopicsThreshold = new CommandOption.Double
(TopicTrain.class, "doc-topics-threshold", "DECIMAL", true, 0.0,
"When writing topic proportions per document with --output-doc-topics, " +
"do not print topics with proportions less than this threshold value.", null);
static CommandOption.Integer docTopicsMax = new CommandOption.Integer
(TopicTrain.class, "doc-topics-max", "INTEGER", true, -1,
"When writing topic proportions per document with --output-doc-topics, " +
"do not print more than INTEGER number of topics. "+
"A negative value indicates that all topics should be printed.", null);
static CommandOption.Integer numTopics = new CommandOption.Integer
(TopicTrain.class, "num-topics", "INTEGER", true, 10,
"The number of topics to fit.", null);
static CommandOption.Integer numIterations = new CommandOption.Integer
(TopicTrain.class, "num-iterations", "INTEGER", true, 1000,
"The number of iterations of Gibbs sampling.", null);
static CommandOption.Integer infIterations = new CommandOption.Integer
(TopicTrain.class, "inf-iterations", "INTEGER", true, 1000,
"The number of iterations of Gibbs sampling.", null);
//you should design this argument to call inferenceOneByOne
static CommandOption.Integer infOneByOne = new CommandOption.Integer
(TopicTrain.class, "inf-onebyone", "INTEGER", true, 0,
"The number of iterations of Gibbs sampling.", null);
//you must design this argument to do inferenceAll, usually it is not needed
static CommandOption.Integer infAllIterations = new CommandOption.Integer
(TopicTrain.class, "inf-all-iterations", "INTEGER", true, 0,
"The number of inference iterations of Gibbs sampling.", null);
static CommandOption.Integer randomSeed = new CommandOption.Integer
(TopicTrain.class, "random-seed", "INTEGER", true, 0,
"The random seed for the Gibbs sampler. Default is 0, which will use the clock.", null);
static CommandOption.Integer topWords = new CommandOption.Integer
(TopicTrain.class, "num-top-words", "INTEGER", true, 20,
"The number of most probable words to print for each topic after model estimation.", null);
static CommandOption.Integer showTopicsInterval = new CommandOption.Integer
(TopicTrain.class, "show-topics-interval", "INTEGER", true, 50,
"The number of iterations between printing a brief summary of the topics so far.", null);
static CommandOption.Integer outputModelInterval = new CommandOption.Integer
(TopicTrain.class, "output-model-interval", "INTEGER", true, 0,
"The number of iterations between writing the model (and its Gibbs sampling state) to a binary file. " +
"You must also set the --output-model to use this option, whose argument will be the prefix of the filenames.", null);
static CommandOption.Integer outputStateInterval = new CommandOption.Integer
(TopicTrain.class, "output-state-interval", "INTEGER", true, 0,
"The number of iterations between writing the sampling state to a text file. " +
"You must also set the --output-state to use this option, whose argument will be the prefix of the filenames.", null);
static CommandOption.Double alpha = new CommandOption.Double
(TopicTrain.class, "alpha", "DECIMAL", true, 50.0,
"Alpha parameter: smoothing over topic distribution.",null);
static CommandOption.Double beta = new CommandOption.Double
(TopicTrain.class, "beta", "DECIMAL", true, 0.01,
"Beta parameter: smoothing over unigram distribution.",null);
public static void main(String[] args) throws java.io.IOException
{
// TODO Auto-generated method stub
// Process the command-line options
CommandOption.setSummary (TopicTrain.class,
"A tool for training and test streamline topic model.");
CommandOption.process (TopicTrain.class, args);
LDAStream lda = null;
if (inputFile.value != null) {
InstanceList instances = InstanceList.load (new File(inputFile.value));
System.out.println ("Training Data loaded.");
lda=new LDAStream(numTopics.value, alpha.value, beta.value);
lda.addInstances(instances);
}
if(testFile.value != null) {
InstanceList testing = InstanceList.load(new File(testFile.value));
lda.setTestingInstances(testing);
}
lda.setTopicDisplay(showTopicsInterval.value, topWords.value);
if (outputModelInterval.value != 0) {
lda.setModelOutput(outputModelInterval.value, outputModelFilename.value);
}
lda.setNumIterations(numIterations.value);
if (randomSeed.value != 0) {
lda.setRandomSeed(randomSeed.value);
}
if (outputStateInterval.value != 0) {
lda.setSaveState(outputStateInterval.value, stateFile.value);
}
lda.estimate();
//save the model, we need typeTopicCounts and tokensPerTopic for empirical likelihood
lda.write(new File (inputFile.value + ".model"));
if (topicKeysFile.value != null) {
lda.printTopWords(new File(topicKeysFile.value), topWords.value, false);
}
if (topicKeysFile.value != null) {
lda.printTopWords(new File(topicKeysFile.value), topWords.value, false);
}
if (topicTypesFile.value != null) {
lda.printPhi(new File(topicTypesFile.value), 1e-4);
}
if (stateFile.value != null) {
lda.printState (lda.getData(), new File(stateFile.value));
}
if (docTopicsFile.value != null) {
lda.printDocumentTopics(lda.getData(), new PrintWriter (new FileWriter ((new File(docTopicsFile.value)))),
docTopicsThreshold.value, docTopicsMax.value);
}
}
}
| 9,609 | 46.107843 | 123 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/tui/Vectors2Topics.java | /* Copyright (C) 2005 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.topics.tui;
import cc.mallet.util.CommandOption;
import cc.mallet.util.Randoms;
import cc.mallet.types.InstanceList;
import cc.mallet.types.FeatureSequence;
import cc.mallet.topics.*;
import java.io.*;
/** Perform topic analysis in the style of LDA and its variants.
* @author <a href="mailto:[email protected]">Andrew McCallum</a>
*/
public class Vectors2Topics {
static CommandOption.String inputFile = new CommandOption.String
(Vectors2Topics.class, "input", "FILENAME", true, null,
"The filename from which to read the list of training instances. Use - for stdin. " +
"The instances must be FeatureSequence or FeatureSequenceWithBigrams, not FeatureVector", null);
static CommandOption.SpacedStrings languageInputFiles = new CommandOption.SpacedStrings
(Vectors2Topics.class, "language-inputs", "FILENAME [FILENAME ...]", true, null,
"Filenames for polylingual topic model. Each language should have its own file, " +
"with the same number of instances in each file. If a document is missing in " +
"one language, there should be an empty instance.", null);
static CommandOption.String testingFile = new CommandOption.String
(Vectors2Topics.class, "testing", "FILENAME", false, null,
"The filename from which to read the list of instances for empirical likelihood calculation. Use - for stdin. " +
"The instances must be FeatureSequence or FeatureSequenceWithBigrams, not FeatureVector", null);
static CommandOption.String outputModelFilename = new CommandOption.String
(Vectors2Topics.class, "output-model", "FILENAME", true, null,
"The filename in which to write the binary topic model at the end of the iterations. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String inputModelFilename = new CommandOption.String
(Vectors2Topics.class, "input-model", "FILENAME", true, null,
"The filename from which to read the binary topic model to which the --input will be appended, " +
"allowing incremental training. " +
"By default this is null, indicating that no file will be read.", null);
static CommandOption.String inferencerFilename = new CommandOption.String
(Vectors2Topics.class, "inferencer-filename", "FILENAME", true, null,
"A topic inferencer applies a previously trained topic model to new documents. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String evaluatorFilename = new CommandOption.String
(Vectors2Topics.class, "evaluator-filename", "FILENAME", true, null,
"A held-out likelihood evaluator for new documents. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String stateFile = new CommandOption.String
(Vectors2Topics.class, "output-state", "FILENAME", true, null,
"The filename in which to write the Gibbs sampling state after at the end of the iterations. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String topicKeysFile = new CommandOption.String
(Vectors2Topics.class, "output-topic-keys", "FILENAME", true, null,
"The filename in which to write the top words for each topic and any Dirichlet parameters. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String topicWordWeightsFile = new CommandOption.String
(Vectors2Topics.class, "topic-word-weights-file", "FILENAME", true, null,
"The filename in which to write unnormalized weights for every topic and word type. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String wordTopicCountsFile = new CommandOption.String
(Vectors2Topics.class, "word-topic-counts-file", "FILENAME", true, null,
"The filename in which to write a sparse representation of topic-word assignments. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String topicReportXMLFile = new CommandOption.String
(Vectors2Topics.class, "xml-topic-report", "FILENAME", true, null,
"The filename in which to write the top words for each topic and any Dirichlet parameters in XML format. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String topicPhraseReportXMLFile = new CommandOption.String
(Vectors2Topics.class, "xml-topic-phrase-report", "FILENAME", true, null,
"The filename in which to write the top words and phrases for each topic and any Dirichlet parameters in XML format. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.String docTopicsFile = new CommandOption.String
(Vectors2Topics.class, "output-doc-topics", "FILENAME", true, null,
"The filename in which to write the topic proportions per document, at the end of the iterations. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.Double docTopicsThreshold = new CommandOption.Double
(Vectors2Topics.class, "doc-topics-threshold", "DECIMAL", true, 0.0,
"When writing topic proportions per document with --output-doc-topics, " +
"do not print topics with proportions less than this threshold value.", null);
static CommandOption.Integer docTopicsMax = new CommandOption.Integer
(Vectors2Topics.class, "doc-topics-max", "INTEGER", true, -1,
"When writing topic proportions per document with --output-doc-topics, " +
"do not print more than INTEGER number of topics. "+
"A negative value indicates that all topics should be printed.", null);
static CommandOption.Integer numTopics = new CommandOption.Integer
(Vectors2Topics.class, "num-topics", "INTEGER", true, 10,
"The number of topics to fit.", null);
static CommandOption.Integer numThreads = new CommandOption.Integer
(Vectors2Topics.class, "num-threads", "INTEGER", true, 1,
"The number of threads for parallel training.", null);
static CommandOption.Integer numIterations = new CommandOption.Integer
(Vectors2Topics.class, "num-iterations", "INTEGER", true, 1000,
"The number of iterations of Gibbs sampling.", null);
static CommandOption.Integer randomSeed = new CommandOption.Integer
(Vectors2Topics.class, "random-seed", "INTEGER", true, 0,
"The random seed for the Gibbs sampler. Default is 0, which will use the clock.", null);
static CommandOption.Integer topWords = new CommandOption.Integer
(Vectors2Topics.class, "num-top-words", "INTEGER", true, 20,
"The number of most probable words to print for each topic after model estimation.", null);
static CommandOption.Integer showTopicsInterval = new CommandOption.Integer
(Vectors2Topics.class, "show-topics-interval", "INTEGER", true, 50,
"The number of iterations between printing a brief summary of the topics so far.", null);
static CommandOption.Integer outputModelInterval = new CommandOption.Integer
(Vectors2Topics.class, "output-model-interval", "INTEGER", true, 0,
"The number of iterations between writing the model (and its Gibbs sampling state) to a binary file. " +
"You must also set the --output-model to use this option, whose argument will be the prefix of the filenames.", null);
static CommandOption.Integer outputStateInterval = new CommandOption.Integer
(Vectors2Topics.class, "output-state-interval", "INTEGER", true, 0,
"The number of iterations between writing the sampling state to a text file. " +
"You must also set the --output-state to use this option, whose argument will be the prefix of the filenames.", null);
static CommandOption.Integer optimizeInterval = new CommandOption.Integer
(Vectors2Topics.class, "optimize-interval", "INTEGER", true, 0,
"The number of iterations between reestimating dirichlet hyperparameters.", null);
static CommandOption.Integer optimizeBurnIn = new CommandOption.Integer
(Vectors2Topics.class, "optimize-burn-in", "INTEGER", true, 200,
"The number of iterations to run before first estimating dirichlet hyperparameters.", null);
static CommandOption.Boolean useSymmetricAlpha = new CommandOption.Boolean
(Vectors2Topics.class, "use-symmetric-alpha", "true|false", false, false,
"Only optimize the concentration parameter of the prior over document-topic distributions. This may reduce the number of very small, poorly estimated topics, but may disperse common words over several topics.", null);
static CommandOption.Boolean useNgrams = new CommandOption.Boolean
(Vectors2Topics.class, "use-ngrams", "true|false", false, false,
"Rather than using LDA, use Topical-N-Grams, which models phrases.", null);
static CommandOption.Boolean usePAM = new CommandOption.Boolean
(Vectors2Topics.class, "use-pam", "true|false", false, false,
"Rather than using LDA, use Pachinko Allocation Model, which models topical correlations." +
"You cannot do this and also --use-ngrams.", null);
static CommandOption.Double alpha = new CommandOption.Double
(Vectors2Topics.class, "alpha", "DECIMAL", true, 50.0,
"Alpha parameter: smoothing over topic distribution.",null);
static CommandOption.Double beta = new CommandOption.Double
(Vectors2Topics.class, "beta", "DECIMAL", true, 0.01,
"Beta parameter: smoothing over unigram distribution.",null);
static CommandOption.Double gamma = new CommandOption.Double
(Vectors2Topics.class, "gamma", "DECIMAL", true, 0.01,
"Gamma parameter: smoothing over bigram distribution",null);
static CommandOption.Double delta = new CommandOption.Double
(Vectors2Topics.class, "delta", "DECIMAL", true, 0.03,
"Delta parameter: smoothing over choice of unigram/bigram",null);
static CommandOption.Double delta1 = new CommandOption.Double
(Vectors2Topics.class, "delta1", "DECIMAL", true, 0.2,
"Topic N-gram smoothing parameter",null);
static CommandOption.Double delta2 = new CommandOption.Double
(Vectors2Topics.class, "delta2", "DECIMAL", true, 1000.0,
"Topic N-gram smoothing parameter",null);
static CommandOption.Integer pamNumSupertopics = new CommandOption.Integer
(Vectors2Topics.class, "pam-num-supertopics", "INTEGER", true, 10,
"When using the Pachinko Allocation Model (PAM) set the number of supertopics. " +
"Typically this is about half the number of subtopics, although more may help.", null);
static CommandOption.Integer pamNumSubtopics = new CommandOption.Integer
(Vectors2Topics.class, "pam-num-subtopics", "INTEGER", true, 20,
"When using the Pachinko Allocation Model (PAM) set the number of subtopics.", null);
public static void main (String[] args) throws java.io.IOException
{
// Process the command-line options
CommandOption.setSummary (Vectors2Topics.class,
"A tool for estimating, saving and printing diagnostics for topic models, such as LDA.");
CommandOption.process (Vectors2Topics.class, args);
if (usePAM.value) {
InstanceList ilist = InstanceList.load (new File(inputFile.value));
System.out.println ("Data loaded.");
if (inputModelFilename.value != null)
throw new IllegalArgumentException ("--input-model not supported with --use-pam.");
PAM4L pam = new PAM4L(pamNumSupertopics.value, pamNumSubtopics.value);
pam.estimate (ilist, numIterations.value, /*optimizeModelInterval*/50,
showTopicsInterval.value,
outputModelInterval.value, outputModelFilename.value,
randomSeed.value == 0 ? new Randoms() : new Randoms(randomSeed.value));
pam.printTopWords(topWords.value, true);
if (stateFile.value != null)
pam.printState (new File(stateFile.value));
if (docTopicsFile.value != null) {
PrintWriter out = new PrintWriter (new FileWriter ((new File(docTopicsFile.value))));
pam.printDocumentTopics (out, docTopicsThreshold.value, docTopicsMax.value);
out.close();
}
if (outputModelFilename.value != null) {
assert (pam != null);
try {
ObjectOutputStream oos = new ObjectOutputStream (new FileOutputStream (outputModelFilename.value));
oos.writeObject (pam);
oos.close();
} catch (Exception e) {
e.printStackTrace();
throw new IllegalArgumentException ("Couldn't write topic model to filename "+outputModelFilename.value);
}
}
}
else if (useNgrams.value) {
InstanceList ilist = InstanceList.load (new File(inputFile.value));
System.out.println ("Data loaded.");
if (inputModelFilename.value != null)
throw new IllegalArgumentException ("--input-model not supported with --use-ngrams.");
TopicalNGrams tng = new TopicalNGrams(numTopics.value,
alpha.value,
beta.value,
gamma.value,
delta.value,
delta1.value,
delta2.value);
tng.estimate (ilist, numIterations.value, showTopicsInterval.value,
outputModelInterval.value, outputModelFilename.value,
randomSeed.value == 0 ? new Randoms() : new Randoms(randomSeed.value));
tng.printTopWords(topWords.value, true);
if (stateFile.value != null)
tng.printState (new File(stateFile.value));
if (docTopicsFile.value != null) {
PrintWriter out = new PrintWriter (new FileWriter ((new File(docTopicsFile.value))));
tng.printDocumentTopics (out, docTopicsThreshold.value, docTopicsMax.value);
out.close();
}
if (outputModelFilename.value != null) {
assert (tng != null);
try {
ObjectOutputStream oos = new ObjectOutputStream (new FileOutputStream (outputModelFilename.value));
oos.writeObject (tng);
oos.close();
} catch (Exception e) {
e.printStackTrace();
throw new IllegalArgumentException ("Couldn't write topic model to filename "+outputModelFilename.value);
}
}
}
else if (languageInputFiles.value != null) {
// Start a new polylingual topic model
PolylingualTopicModel topicModel = null;
InstanceList[] training = new InstanceList[ languageInputFiles.value.length ];
for (int i=0; i < training.length; i++) {
training[i] = InstanceList.load(new File(languageInputFiles.value[i]));
if (training[i] != null) { System.out.println(i + " is not null"); }
else { System.out.println(i + " is null"); }
}
System.out.println ("Data loaded.");
// For historical reasons we currently only support FeatureSequence data,
// not the FeatureVector, which is the default for the input functions.
// Provide a warning to avoid ClassCastExceptions.
if (training[0].size() > 0 &&
training[0].get(0) != null) {
Object data = training[0].get(0).getData();
if (! (data instanceof FeatureSequence)) {
System.err.println("Topic modeling currently only supports feature sequences: use --keep-sequence option when importing data.");
System.exit(1);
}
}
topicModel = new PolylingualTopicModel (numTopics.value, alpha.value);
if (randomSeed.value != 0) {
topicModel.setRandomSeed(randomSeed.value);
}
topicModel.addInstances(training);
topicModel.setTopicDisplay(showTopicsInterval.value, topWords.value);
topicModel.setNumIterations(numIterations.value);
topicModel.setOptimizeInterval(optimizeInterval.value);
topicModel.setBurninPeriod(optimizeBurnIn.value);
if (outputStateInterval.value != 0) {
topicModel.setSaveState(outputStateInterval.value, stateFile.value);
}
if (outputModelInterval.value != 0) {
topicModel.setModelOutput(outputModelInterval.value, outputModelFilename.value);
}
topicModel.estimate();
if (topicKeysFile.value != null) {
topicModel.printTopWords(new File(topicKeysFile.value), topWords.value, false);
}
if (stateFile.value != null) {
topicModel.printState (new File(stateFile.value));
}
if (docTopicsFile.value != null) {
PrintWriter out = new PrintWriter (new FileWriter ((new File(docTopicsFile.value))));
topicModel.printDocumentTopics(out, docTopicsThreshold.value, docTopicsMax.value);
out.close();
}
if (outputModelFilename.value != null) {
assert (topicModel != null);
try {
ObjectOutputStream oos =
new ObjectOutputStream (new FileOutputStream (outputModelFilename.value));
oos.writeObject (topicModel);
oos.close();
} catch (Exception e) {
e.printStackTrace();
throw new IllegalArgumentException ("Couldn't write topic model to filename "+outputModelFilename.value);
}
}
}
else {
// Start a new LDA topic model
ParallelTopicModel topicModel = null;
if (inputModelFilename.value != null) {
try {
topicModel = ParallelTopicModel.read(new File(inputModelFilename.value));
} catch (Exception e) {
System.err.println("Unable to restore saved topic model " +
inputModelFilename.value + ": " + e);
System.exit(1);
}
/*
// Loading new data is optional if we are restoring a saved state.
if (inputFile.value != null) {
InstanceList instances = InstanceList.load (new File(inputFile.value));
System.out.println ("Data loaded.");
lda.addInstances(instances);
}
*/
}
else {
InstanceList training = InstanceList.load (new File(inputFile.value));
System.out.println ("Data loaded.");
if (training.size() > 0 &&
training.get(0) != null) {
Object data = training.get(0).getData();
if (! (data instanceof FeatureSequence)) {
System.err.println("Topic modeling currently only supports feature sequences: use --keep-sequence option when importing data.");
System.exit(1);
}
}
topicModel = new ParallelTopicModel (numTopics.value, alpha.value, beta.value);
if (randomSeed.value != 0) {
topicModel.setRandomSeed(randomSeed.value);
}
topicModel.addInstances(training);
}
topicModel.setTopicDisplay(showTopicsInterval.value, topWords.value);
/*
if (testingFile.value != null) {
topicModel.setTestingInstances( InstanceList.load(new File(testingFile.value)) );
}
*/
topicModel.setNumIterations(numIterations.value);
topicModel.setOptimizeInterval(optimizeInterval.value);
topicModel.setBurninPeriod(optimizeBurnIn.value);
topicModel.setSymmetricAlpha(useSymmetricAlpha.value);
if (outputStateInterval.value != 0) {
topicModel.setSaveState(outputStateInterval.value, stateFile.value);
}
if (outputModelInterval.value != 0) {
topicModel.setSaveSerializedModel(outputModelInterval.value, outputModelFilename.value);
}
topicModel.setNumThreads(numThreads.value);
topicModel.estimate();
if (topicKeysFile.value != null) {
topicModel.printTopWords(new File(topicKeysFile.value), topWords.value, false);
}
if (topicReportXMLFile.value != null) {
PrintWriter out = new PrintWriter(topicReportXMLFile.value);
topicModel.topicXMLReport(out, topWords.value);
out.close();
}
if (topicPhraseReportXMLFile.value != null) {
PrintWriter out = new PrintWriter(topicPhraseReportXMLFile.value);
topicModel.topicPhraseXMLReport(out, topWords.value);
out.close();
}
if (stateFile.value != null) {
topicModel.printState (new File(stateFile.value));
}
if (docTopicsFile.value != null) {
PrintWriter out = new PrintWriter (new FileWriter ((new File(docTopicsFile.value))));
topicModel.printDocumentTopics(out, docTopicsThreshold.value, docTopicsMax.value);
out.close();
}
if (topicWordWeightsFile.value != null) {
topicModel.printTopicWordWeights(new File (topicWordWeightsFile.value));
}
if (wordTopicCountsFile.value != null) {
topicModel.printTypeTopicCounts(new File (wordTopicCountsFile.value));
}
if (outputModelFilename.value != null) {
assert (topicModel != null);
try {
ObjectOutputStream oos =
new ObjectOutputStream (new FileOutputStream (outputModelFilename.value));
oos.writeObject (topicModel);
oos.close();
} catch (Exception e) {
e.printStackTrace();
throw new IllegalArgumentException ("Couldn't write topic model to filename "+outputModelFilename.value);
}
}
if (inferencerFilename.value != null) {
try {
ObjectOutputStream oos =
new ObjectOutputStream(new FileOutputStream(inferencerFilename.value));
oos.writeObject(topicModel.getInferencer());
oos.close();
} catch (Exception e) {
System.err.println(e.getMessage());
}
}
if (evaluatorFilename.value != null) {
try {
ObjectOutputStream oos =
new ObjectOutputStream(new FileOutputStream(evaluatorFilename.value));
oos.writeObject(topicModel.getProbEstimator());
oos.close();
} catch (Exception e) {
System.err.println(e.getMessage());
}
}
}
}
}
| 21,513 | 41.60198 | 220 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/tui/DMRLoader.java | package cc.mallet.topics.tui;
import cc.mallet.classify.*;
import cc.mallet.types.*;
import cc.mallet.pipe.*;
import java.util.logging.*;
import java.util.*;
import java.util.zip.*;
import java.io.*;
import gnu.trove.*;
/**
* This class loads data into the format for the MALLET
* Dirichlet-multinomial regression (DMR). DMR topic models
* learn topic assignments conditioned on observed features.
* <p>
* The input format consists of two files, one for text and
* the other for features. The "text" file consists of one document
* per line. This class will tokenize and remove stopwords.
* <p>
* The "features" file contains whitespace-delimited features in this format:
* <code>blue heavy width=12.08</code>
* Features without explicit values ("blue" and "heavy" in the example) are set to 1.0.
*/
public class DMRLoader implements Serializable {
public static BufferedReader openReader(File file) throws IOException {
BufferedReader reader = null;
if (file.toString().endsWith(".gz")) {
reader = new BufferedReader(new InputStreamReader(new GZIPInputStream(new FileInputStream(file))));
}
else {
reader = new BufferedReader(new FileReader (file));
}
return reader;
}
public void load(File wordsFile, File featuresFile, File instancesFile) throws IOException, FileNotFoundException {
Pipe instancePipe =
new SerialPipes (new Pipe[] {
(Pipe) new TargetStringToFeatures(),
(Pipe) new CharSequence2TokenSequence(),
(Pipe) new TokenSequenceLowercase(),
(Pipe) new TokenSequenceRemoveStopwords(false, false),
(Pipe) new TokenSequence2FeatureSequence()
});
InstanceList instances = new InstanceList (instancePipe);
ArrayList<Instance> instanceBuffer = new ArrayList<Instance>();
BufferedReader wordsReader = openReader(wordsFile);
BufferedReader featuresReader = openReader(featuresFile);
int lineNumber = 1;
String wordsLine = null;
String featuresLine = null;
while ((wordsLine = wordsReader.readLine()) != null) {
if ((featuresLine = featuresReader.readLine()) == null) {
System.err.println("ran out of features");
System.exit(0);
}
if (featuresLine.equals("")) { continue; }
instanceBuffer.add(new Instance(wordsLine, featuresLine, String.valueOf(lineNumber), null));
lineNumber++;
}
instances.addThruPipe(instanceBuffer.iterator());
ObjectOutputStream oos =
new ObjectOutputStream(new BufferedOutputStream(new FileOutputStream(instancesFile)));
oos.writeObject(instances);
oos.close();
}
public static void main (String[] args) throws FileNotFoundException, IOException {
if (args.length != 3) {
System.err.println("Usage: DMRLoader [words file] [features file] [instances file]");
System.exit(0);
}
File wordsFile = new File(args[0]);
File featuresFile = new File(args[1]);
File instancesFile = new File(args[2]);
DMRLoader loader = new DMRLoader();
loader.load(wordsFile, featuresFile, instancesFile);
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
}
| 3,214 | 29.046729 | 116 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/tui/InferTopics.java | package cc.mallet.topics.tui;
import cc.mallet.util.*;
import cc.mallet.types.*;
import cc.mallet.topics.*;
import java.io.*;
public class InferTopics {
static CommandOption.String inferencerFilename = new CommandOption.String
(InferTopics.class, "inferencer", "FILENAME", true, null,
"A serialized topic inferencer from a trained topic model.\n" +
"By default this is null, indicating that no file will be read.", null);
static CommandOption.String inputFile = new CommandOption.String
(InferTopics.class, "input", "FILENAME", true, null,
"The filename from which to read the list of instances\n" +
"for which topics should be inferred. Use - for stdin. " +
"The instances must be FeatureSequence or FeatureSequenceWithBigrams, not FeatureVector", null);
static CommandOption.String docTopicsFile = new CommandOption.String
(InferTopics.class, "output-doc-topics", "FILENAME", true, null,
"The filename in which to write the inferred topic\n" +
"proportions per document. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.Double docTopicsThreshold = new CommandOption.Double
(InferTopics.class, "doc-topics-threshold", "DECIMAL", true, 0.0,
"When writing topic proportions per document with --output-doc-topics, " +
"do not print topics with proportions less than this threshold value.", null);
static CommandOption.Integer docTopicsMax = new CommandOption.Integer
(InferTopics.class, "doc-topics-max", "INTEGER", true, -1,
"When writing topic proportions per document with --output-doc-topics, " +
"do not print more than INTEGER number of topics. "+
"A negative value indicates that all topics should be printed.", null);
static CommandOption.Integer numIterations = new CommandOption.Integer
(InferTopics.class, "num-iterations", "INTEGER", true, 100,
"The number of iterations of Gibbs sampling.", null);
static CommandOption.Integer sampleInterval = new CommandOption.Integer
(InferTopics.class, "sample-interval", "INTEGER", true, 10,
"The number of iterations between saved samples.", null);
static CommandOption.Integer burnInIterations = new CommandOption.Integer
(InferTopics.class, "burn-in", "INTEGER", true, 10,
"The number of iterations before the first sample is saved.", null);
static CommandOption.Integer randomSeed = new CommandOption.Integer
(InferTopics.class, "random-seed", "INTEGER", true, 0,
"The random seed for the Gibbs sampler. Default is 0, which will use the clock.", null);
public static void main (String[] args) {
// Process the command-line options
CommandOption.setSummary (InferTopics.class,
"Use an existing topic model to infer topic distributions for new documents");
CommandOption.process (InferTopics.class, args);
if (inferencerFilename.value == null) {
System.err.println("You must specify a serialized topic inferencer. Use --help to list options.");
System.exit(0);
}
if (inputFile.value == null) {
System.err.println("You must specify a serialized instance list. Use --help to list options.");
System.exit(0);
}
try {
TopicInferencer inferencer =
TopicInferencer.read(new File(inferencerFilename.value));
InstanceList instances = InstanceList.load (new File(inputFile.value));
inferencer.writeInferredDistributions(instances, new File(docTopicsFile.value),
numIterations.value, sampleInterval.value,
burnInIterations.value,
docTopicsThreshold.value, docTopicsMax.value);
} catch (Exception e) {
e.printStackTrace();
System.err.println(e.getMessage());
}
}
}
| 3,924 | 42.131868 | 118 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/examples/SimpleGraphExample.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.examples;
import java.util.Random;
import cc.mallet.grmm.inference.Inferencer;
import cc.mallet.grmm.inference.JunctionTreeInferencer;
import cc.mallet.grmm.types.*;
/**
* Created: Aug 13, 2004
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: SimpleGraphExample.java,v 1.1 2007/10/22 21:38:02 mccallum Exp $
*/
public class SimpleGraphExample {
public static void main (String[] args)
{
// STEP 1: Create the graph
Variable[] allVars = {
new Variable (2),
new Variable (2),
new Variable (2),
new Variable (2)
};
FactorGraph mdl = new FactorGraph (allVars);
// Create a diamond graph, with random potentials
Random r = new Random (42);
for (int i = 0; i < allVars.length; i++) {
double[] ptlarr = new double [4];
for (int j = 0; j < ptlarr.length; j++)
ptlarr[j] = Math.abs (r.nextDouble ());
Variable v1 = allVars[i];
Variable v2 = allVars[(i + 1) % allVars.length];
mdl.addFactor (v1, v2, ptlarr);
}
// STEP 2: Compute marginals
Inferencer inf = new JunctionTreeInferencer ();
inf.computeMarginals (mdl);
// STEP 3: Collect the results
// We'll just print them out
for (int varnum = 0; varnum < allVars.length; varnum++) {
Variable var = allVars[varnum];
Factor ptl = inf.lookupMarginal (var);
for (AssignmentIterator it = ptl.assignmentIterator (); it.hasNext ();) {
int outcome = it.indexOfCurrentAssn ();
System.out.println (var+" "+outcome+" "+ptl.value (it));
}
System.out.println ();
}
}
}
| 2,080 | 28.309859 | 81 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/examples/CrossTemplate1.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.examples;
import cc.mallet.grmm.learning.ACRF;
import cc.mallet.grmm.types.Variable;
import cc.mallet.grmm.util.LabelsAssignment;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.FeatureVectorSequence;
/**
* $Id: CrossTemplate1.java,v 1.1 2007/10/22 21:38:02 mccallum Exp $
*/
public class CrossTemplate1 extends ACRF.SequenceTemplate {
private int lvl1 = 0;
private int lvl2 = 1;
public CrossTemplate1 (int lvl1, int lvl2)
{
this.lvl1 = lvl1;
this.lvl2 = lvl2;
}
protected void addInstantiatedCliques (ACRF.UnrolledGraph graph, FeatureVectorSequence fvs, LabelsAssignment lblseq)
{
for (int t = 0; t < lblseq.size() - 1; t++) {
Variable var1 = lblseq.varOfIndex (t, lvl1);
Variable var2 = lblseq.varOfIndex (t + 1, lvl2);
assert var1 != null : "Couldn't get label factor "+lvl1+" time "+t;
assert var2 != null : "Couldn't get label factor "+lvl2+" time "+(t+1);
Variable[] vars = new Variable[] { var1, var2 };
FeatureVector fv = fvs.getFeatureVector (t);
ACRF.UnrolledVarSet vs = new ACRF.UnrolledVarSet (graph, this, vars, fv);
graph.addClique (vs);
}
}
}
| 1,600 | 34.577778 | 118 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/examples/SimpleFactorExample.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.examples;
import cc.mallet.grmm.types.*;
/**
* A simple example to demonstrate the row-major indexing of potential values.
*
* Created: Aug 30, 2004
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: SimpleFactorExample.java,v 1.1 2007/10/22 21:38:02 mccallum Exp $
*/
public class SimpleFactorExample {
public static void main (String[] args)
{
FactorGraph mdl = new FactorGraph ();
Variable[] vars = new Variable [] {
new Variable (2),
new Variable (2),
new Variable (3),
new Variable (2),
new Variable (2),
};
/* Create an edge potential looking like
VARS[0] VARS[1] VALUE
0 0 0.6
0 1 1.3
1 0 0.3
1 1 2.3
*/
double[] arr = new double[] { 0.6, 1.3, 0.3, 2.3, };
mdl.addFactor (vars[0], vars[1], arr);
System.out.println ("Model with one edge potential:");
mdl.dump ();
/* Add a three-clique potential whose values are
VARS[2] VARS[3] VARS[4] VALUE
0 0 0 1
0 0 1 2
0 1 0 3
0 1 1 4
1 0 0 11
1 0 1 12
1 1 0 13
1 1 1 14
2 0 0 21
2 0 1 22
2 1 0 23
2 1 1 24
*/
double[] arr2 = { 1, 2, 3, 4, 11, 12, 13, 14, 21, 22, 23, 24 };
VarSet varSet = new HashVarSet (new Variable[] { vars[2], vars[3], vars[4] });
Factor ptl = new TableFactor (varSet, arr2);
mdl.addFactor (ptl);
System.out.println ("Model with a 3-clique added:");
mdl.dump ();
}
}
| 2,438 | 34.867647 | 84 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/examples/SimpleCrfExample.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.examples;
import java.io.File;
import java.io.FileReader;
import java.io.FileNotFoundException;
import java.util.regex.Pattern;
import cc.mallet.grmm.learning.ACRF;
import cc.mallet.grmm.learning.ACRFTrainer;
import cc.mallet.grmm.learning.DefaultAcrfTrainer;
import cc.mallet.grmm.learning.GenericAcrfData2TokenSequence;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.SerialPipes;
import cc.mallet.pipe.TokenSequence2FeatureVectorSequence;
import cc.mallet.pipe.iterator.LineGroupIterator;
import cc.mallet.types.InstanceList;
import cc.mallet.util.FileUtils;
/**
* $Id: SimpleCrfExample.java,v 1.1 2007/10/22 21:38:02 mccallum Exp $
*/
public class SimpleCrfExample {
public static void main (String[] args) throws FileNotFoundException
{
File trainFile = new File (args[0]);
File testFile = new File (args[1]);
File crfFile = new File (args[2]);
Pipe pipe = new SerialPipes (new Pipe[] {
new GenericAcrfData2TokenSequence (2),
new TokenSequence2FeatureVectorSequence (true, true),
});
InstanceList training = new InstanceList (pipe);
training.addThruPipe (new LineGroupIterator (new FileReader (trainFile),
Pattern.compile ("\\s*"),
true));
InstanceList testing = new InstanceList (pipe);
training.addThruPipe (new LineGroupIterator (new FileReader (testFile),
Pattern.compile ("\\s*"),
true));
ACRF.Template[] tmpls = new ACRF.Template[] {
new ACRF.BigramTemplate (0),
new ACRF.BigramTemplate (1),
new ACRF.PairwiseFactorTemplate (0,1),
new CrossTemplate1 (0,1)
};
ACRF acrf = new ACRF (pipe, tmpls);
ACRFTrainer trainer = new DefaultAcrfTrainer ();
trainer.train (acrf, training, null, testing, 99999);
FileUtils.writeGzippedObject (crfFile, acrf);
}
}
| 2,410 | 34.455882 | 76 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/learning/ACRFTrainer.java | package cc.mallet.grmm.learning;
import cc.mallet.optimize.Optimizable;
import cc.mallet.types.InstanceList;
/**
* $Id: ACRFTrainer.java,v 1.1 2007/10/22 21:37:43 mccallum Exp $
*/
public interface ACRFTrainer {
boolean train (ACRF acrf, InstanceList training);
boolean train (ACRF acrf, InstanceList training, int numIter);
boolean train (ACRF acrf, InstanceList training, ACRFEvaluator eval, int numIter);
boolean train (ACRF acrf,
InstanceList training,
InstanceList validation,
InstanceList testing,
int numIter);
boolean train (ACRF acrf,
InstanceList training,
InstanceList validation,
InstanceList testing,
ACRFEvaluator eval,
int numIter);
boolean train (ACRF acrf,
InstanceList training,
InstanceList validation,
InstanceList testing,
ACRFEvaluator eval,
int numIter,
Optimizable.ByGradientValue macrf);
}
| 1,092 | 27.763158 | 84 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/learning/ACRF.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.learning;
import java.util.logging.Logger;
import java.io.*;
import java.util.*;
import java.util.regex.Pattern;
import gnu.trove.*;
import org.jdom.Document;
import org.jdom.Element;
import org.jdom.JDOMException;
import org.jdom.input.SAXBuilder;
import cc.mallet.grmm.inference.*;
import cc.mallet.grmm.types.*;
import cc.mallet.grmm.util.LabelsAssignment;
import cc.mallet.grmm.util.Models;
import cc.mallet.optimize.Optimizable;
import cc.mallet.pipe.Pipe;
import cc.mallet.types.*;
import cc.mallet.util.ArrayUtils;
import cc.mallet.util.MalletLogger;
/**
* Class for Arbitrary CRFs. These are CRFs with completely
* arbitrary graphical structure. The user passes in a list
* of instances of ACRF.CliqueFactory, which get to look at
* the sequence and decide what
*
* @author <a href="mailto:[email protected]">Charles Sutton</a>
* @version $Id: ACRF.java,v 1.1 2007/10/22 21:37:43 mccallum Exp $
*/
public class ACRF implements Serializable {
transient private static Logger logger = MalletLogger.getLogger (ACRF.class.getName());
Template[] templates;
List fixedPtls = new ArrayList (0);
private GraphPostProcessor graphProcessor;
Alphabet inputAlphabet;
private Inferencer globalInferencer = new TRP();
private Inferencer viterbi = TRP.createForMaxProduct ();
int defaultFeatureIndex;
private Pipe inputPipe;
private boolean cacheUnrolledGraphs = false;
transient private Map graphCache = new THashMap ();
private double gaussianPriorVariance = DEFAULT_GAUSSIAN_PRIOR_VARIANCE;
private static final double DEFAULT_GAUSSIAN_PRIOR_VARIANCE = 10.0;
private boolean doSizeScale = false;
/**
* Create a ACRF for a 1-d sequence. Needs an array
* of Templates.
*/
public ACRF (Pipe inputPipe, Template[] tmpls)
throws IllegalArgumentException
{
this.inputPipe = inputPipe;
this.templates = tmpls;
this.inputAlphabet = inputPipe.getDataAlphabet();
this.defaultFeatureIndex = inputAlphabet.size ();
for (int tidx = 0; tidx < templates.length; tidx++) templates [tidx].index = tidx;
}
// Accessors
public Alphabet getInputAlphabet () { return inputAlphabet; }
public int getDefaultFeatureIndex () { return defaultFeatureIndex; }
public Inferencer getInferencer () { return globalInferencer; }
public void setInferencer (Inferencer inf) { globalInferencer = inf; }
public Inferencer getViterbiInferencer () { return viterbi; }
public void setViterbiInferencer (Inferencer inf) { viterbi = inf; }
public boolean isDoSizeScale ()
{
return doSizeScale;
}
public void setDoSizeScale (boolean doSizeScale)
{
this.doSizeScale = doSizeScale;
}
/**
* Sets all templates of this ACRF to use supported features only.
* @param b If true, all templates will use supported features only. Otherwise, all unsupported features will be used.
*/
public void setSupportedOnly (boolean b)
{
for (int i = 0; i < templates.length; i++) {
templates[i].setSupportedOnly (b);
}
}
public boolean isCacheUnrolledGraphs () { return cacheUnrolledGraphs; }
public void setCacheUnrolledGraphs (boolean cacheUnrolledGraphs) { this.cacheUnrolledGraphs = cacheUnrolledGraphs; }
public void setFixedPotentials (Template[] fixed) {
this.fixedPtls = java.util.Arrays.asList (fixed);
for (int tidx = 0; tidx < fixed.length; tidx++) fixed [tidx].index = -1;
}
public void addFixedPotentials (Template[] tmpls) {
for (int i = 0; i < tmpls.length; i++) {
Template tmpl = tmpls[i];
tmpl.setTrainable (false);
fixedPtls.add (tmpl);
tmpl.index = -1;
}
}
public Template[] getTemplates () { return templates; }
public Pipe getInputPipe () { return inputPipe; }
public Template[] getFixedTemplates ()
{
return (Template[]) fixedPtls.toArray (new Template [fixedPtls.size()]);
}
public void addFixedPotential (Template tmpl)
{
tmpl.setTrainable (false);
fixedPtls.add (tmpl);
tmpl.index = -1;
}
public double getGaussianPriorVariance ()
{
return gaussianPriorVariance;
}
public void setGaussianPriorVariance (double gaussianPriorVariance)
{
this.gaussianPriorVariance = gaussianPriorVariance;
}
public void setGraphProcessor (GraphPostProcessor graphProcessor)
{
this.graphProcessor = graphProcessor;
}
/**
* Interface for making global transformations to an unrolled graph after it has been generated.
* For example, directed models can be simulated by selectively normalizing potentials.
*/
public static interface GraphPostProcessor extends Serializable {
void process (UnrolledGraph graph, Instance inst);
}
/**
* A type of clique in the model. Each type of clique is assumed
* to have the same number of possible outcomes and the same set
* of weights
*/
// TODO Make an interface, implement with LogLinearTemplate & FixedTemplate
public abstract static class Template implements Serializable
{
private static final double SOME_UNSUPPORTED_THRESHOLD = 0.1;
private boolean unsupportedWeightsAdded = false;
/**
* Adds all instiated cliques for an instance. This method is
* called as a graph is being unrolled for an instance.
*
* @param graph The graph that the cliques will be added to.
* @param instance Instance to unroll grpah for. Subclasses are free
* to specify what types they expect in the Instance's slots.
*/
public abstract void addInstantiatedCliques (UnrolledGraph graph, Instance instance);
/**
* Modifies a factor computed from this template. This is useful for templates that
* wish to implement special normalization, etc. The default implementation does nothing.
* <P>
* WARNING: If you implement this method, it is likely that you will change the derivative of
* this factor with respect to weights[]. This means that you will not be able to use the
* default <tt>ACRFTrainer</tt> for this template.
*
* @param unrolledGraph The graph in which the factor sits
* @param clique The set of nodes which are the domain of the factor
* @param ptl The factor to modify
*/
protected void modifyPotential (UnrolledGraph unrolledGraph, UnrolledVarSet clique, AbstractTableFactor ptl) {}
protected SparseVector[] weights;
private BitSet assignmentsPresent;
private boolean supportedOnly = true;
protected boolean isSupportedOnly ()
{
return supportedOnly;
}
/**
* Sets whether this template will use supported features only.
*/
void setSupportedOnly (boolean supportedOnly)
{
this.supportedOnly = supportedOnly;
}
public boolean isUnsupportedWeightsAdded ()
{
return unsupportedWeightsAdded;
}
protected BitSet getAssignmentsPresent ()
{
return assignmentsPresent;
}
/**
* Returns the weights for this clique template. Each possible
* assignment to the clique can in general have a different set of
* weights ,so this method returns an array of SparseVectors w,
* where w[i] are the weights for assignment i.
*/
public SparseVector[] getWeights () { return weights; }
public void setWeights (SparseVector[] w) {
if ((weights != null) && w.length != weights.length) {
throw new IllegalArgumentException ("Weights length changed; was "+weights.length+" now is "+w.length);
}
weights = w;
}
/**
* Initializes the weight vectors to the appropriate size for a
* set of training data.
* @return Number of weights created.
*/
public int initWeights (InstanceList training)
{
logger.info ("Template "+this+" : weights "+(supportedOnly ? "with NO" : "with ALL" ) +" unsupported features...");
if (supportedOnly) {
return initSparseWeights (training);
} else {
return initDenseWeights (training);
}
}
private int initDenseWeights (InstanceList training)
{
int numf = training.getDataAlphabet ().size ();
int total = 0;
// handle default weights
int size = cliqueSizeFromInstance (training);
total += allocateDefaultWeights (size);
// and regular weights
SparseVector[] newWeights = new SparseVector [size];
for (int i = 0; i < size; i++) {
newWeights [i] = new SparseVector (new double[numf], false);
if (weights != null)
newWeights [i].plusEqualsSparse (weights [i]);
total += numf;
logger.info ("ACRF template "+this+" weights ["+i+"] num features "+numf);
}
logger.info ("ACRF template "+this+" total num weights = "+total);
weights = newWeights;
return total;
}
private int initSparseWeights (InstanceList training)
{
// checkCliqueSizeConsistent (training); //debug
int total = 0;
// Build this bitsets that tell us what weights occur in the data
int size = cliqueSizeFromInstance (training);
BitSet[] weightsPresent = new BitSet [size];
for (int i = 0; i < size; i++) {
weightsPresent [i] = new BitSet ();
}
assignmentsPresent = new BitSet (size);
collectWeightsPresent (training, weightsPresent);
if (weights != null) {
addInCurrentWeights (weightsPresent);
}
// We can allocate default Weights now
total += allocateDefaultWeights (size);
// Use those to allocate the SparseVectors
SparseVector[] newWeights = new SparseVector [size];
total += allocateNewWeights (weightsPresent, newWeights);
logger.info ("ACRF template "+this+" total num weights = "+total);
this.weights = newWeights;
return total;
}
private int allocateNewWeights (BitSet[] weightsPresent, SparseVector[] newWeights)
{
int total = 0;
for (int i = 0; i < weightsPresent.length; i++) {
// Create a sparse vector, with the allowable indices
// specified in advance.
int numLocations = weightsPresent [i].cardinality ();
int indices[] = new int [numLocations];
for (int j = 0; j < numLocations; j++) {
indices[j] = weightsPresent [i].nextSetBit (j == 0 ? 0 : indices[j-1]+1);
// System.out.println ("ACRF "+this+" ["+i+"] has index "+indices[j]);
}
newWeights [i] = new HashedSparseVector (indices, new double[numLocations],
numLocations, numLocations, false, false, false);
if (weights != null)
newWeights [i].plusEqualsSparse (weights [i]);
total += numLocations;
if (numLocations != 0)
logger.info ("ACRF template "+this+" weights ["+i+"] num features "+numLocations);
}
return total;
}
// assumes weights already initialized
public int addSomeUnsupportedWeights (InstanceList training)
{
// add debugging marker
unsupportedWeightsAdded = true;
int size = weights.length;
BitSet[] weightsPresent = new BitSet [size];
for (int i = 0; i < size; i++) {
weightsPresent [i] = new BitSet ();
}
collectSomeUnsupportedWeights (training, weightsPresent);
addInCurrentWeights (weightsPresent);
SparseVector[] newWeights = new SparseVector [size];
int numAdded = allocateNewWeights (weightsPresent, newWeights);
logger.info (this+" some supported weights added = "+numAdded);
weights = newWeights;
return numAdded;
}
private void collectSomeUnsupportedWeights (InstanceList training, BitSet[] weightsPresent)
{
for (int ii = 0; ii < training.size(); ii++) {
Instance inst = training.get (ii);
UnrolledGraph unrolled = new UnrolledGraph (inst, new Template[] { this }, new ArrayList (), true);
for (Iterator it = unrolled.unrolledVarSetIterator (); it.hasNext();) {
UnrolledVarSet vs = (UnrolledVarSet) it.next ();
Factor f = vs.getFactor ();
Factor nrmed = f.normalize ();
for (AssignmentIterator assnIt = nrmed.assignmentIterator (); assnIt.hasNext ();) {
if (nrmed.value (assnIt) > SOME_UNSUPPORTED_THRESHOLD) {
addPresentFeatures (weightsPresent [assnIt.indexOfCurrentAssn ()], vs.fv);
}
assnIt.advance ();
}
}
}
}
private int allocateDefaultWeights (int size)
{
SparseVector newdefaultWeights = new SparseVector (new double [size], false);
if (defaultWeights != null) newdefaultWeights.plusEqualsSparse (defaultWeights);
defaultWeights = newdefaultWeights;
return size;
}
private int cliqueSizeFromInstance (InstanceList training)
{
int maxWeight = 0;
for (int i = 0; i < training.size(); i++) {
Instance instance = training.get (i);
UnrolledGraph unrolled = new UnrolledGraph (instance, new Template[] { this }, null, false);
for (Iterator it = unrolled.unrolledVarSetIterator (); it.hasNext();) {
UnrolledVarSet clique = (UnrolledVarSet) it.next ();
if (clique.tmpl == this) {
int thisWeight = clique.weight ();
if (thisWeight > maxWeight) {
maxWeight = thisWeight;
}
}
}
}
if (maxWeight == 0)
logger.warning ("***ACRF: Don't know size of "+this+". Never needed in training data.");
return maxWeight;
}
// debugging function
private void checkCliqueSizeConsistent (InstanceList training)
{
int weight = -1;
for (int i = 0; i < training.size(); i++) {
Instance instance = training.get (i);
UnrolledGraph unrolled = new UnrolledGraph (instance, new Template[] { this }, null, false);
for (Iterator it = unrolled.unrolledVarSetIterator (); it.hasNext();) {
UnrolledVarSet clique = (UnrolledVarSet) it.next ();
if (clique.tmpl == this) {
if (weight != clique.weight ()) {
System.err.println ("Weight change for clique "+clique+" template "+this+" old = "+weight+" new "+clique.weight ());
for (int vi = 0; vi < clique.size(); vi++) {
Variable var = clique.get(vi);
System.err.println (var+"\t"+var.getNumOutcomes());
}
if (weight == -1) {
weight = clique.weight ();
} else {
throw new IllegalStateException ("Error on instance "+instance+": Template "+this+" clique "+clique+" error. Strange weight: was "+weight+" now is "+clique.weight());
}
}
}
}
}
}
private void addInCurrentWeights (BitSet[] weightsPresent)
{
for (int assn = 0; assn < weights.length; assn++) {
for (int j = 0; j < weights[assn].numLocations(); j++) {
weightsPresent[assn].set (weights[assn].indexAtLocation (j));
}
}
}
private void collectWeightsPresent (InstanceList ilist, BitSet[] weightsPresent)
{
for (int inum = 0; inum < ilist.size(); inum++) {
Instance inst = ilist.get (inum);
UnrolledGraph unrolled = new UnrolledGraph (inst, new Template[] { this }, null, false);
collectTransitionsPresentForGraph (unrolled);
collectWeightsPresentForGraph (unrolled, weightsPresent);
}
}
private void collectTransitionsPresentForGraph (UnrolledGraph unrolled)
{
for (Iterator it = unrolled.unrolledVarSetIterator (); it.hasNext();) {
UnrolledVarSet clique = (UnrolledVarSet) it.next ();
if (clique.tmpl == this) {
int assnNo = clique.lookupAssignmentNumber ();
assignmentsPresent.set (assnNo);
}
}
}
private void collectWeightsPresentForGraph (UnrolledGraph unrolled, BitSet[] weightsPresent)
{
for (Iterator it = unrolled.unrolledVarSetIterator (); it.hasNext();) {
UnrolledVarSet clique = (UnrolledVarSet) it.next ();
if (clique.tmpl == this) {
int assn = clique.lookupAssignmentNumber ();
addPresentFeatures (weightsPresent[assn], clique.fv);
}
}
}
private void addPresentFeatures (BitSet wp, FeatureVector fv)
{
for (int i = 0; i < fv.numLocations (); i++) {
int index = fv.indexAtLocation (i);
wp.set (index);
}
}
public AbstractTableFactor computeFactor (UnrolledVarSet clique)
{
Matrix phi = createFactorMatrix(clique);
SparseVector[] weights = getWeights();
// System.out.println("UnrolledClique "+clique);
// System.out.println("FV : "+clique.fv);
for (int loc = 0; loc < phi.numLocations(); loc++) {
int idx = phi.indexAtLocation(loc);
assert idx < weights.length :
"Error: Instantiating "+this+" on "+clique+" : Clique has too many "
+"assignments.\n # of weights = "+weights.length+" clique weight = "+clique.weight();
SparseVector w = weights[idx];
// System.out.println("Weights "+idx+" : "+w);
// w.print();
double dp = w.dotProduct(clique.fv);
dp += getDefaultWeight(idx);
phi.setValueAtLocation(loc, dp);
}
AbstractTableFactor ptl = new LogTableFactor(clique);
ptl.setValues(phi);
return ptl;
}
/**
* Creates an empty matrix for use in storing factor values when this template is unrolled.
* By overriding this method, subclasses may enforce that factors generated be sparse.
* @param clique
* @return An empty Matrixn
*/
protected Matrix createFactorMatrix (UnrolledVarSet clique)
{
int[] szs = clique.varDimensions ();
return new Matrixn (szs);
}
public int index;
private SparseVector defaultWeights;
public double getDefaultWeight (int i) { return defaultWeights.value (i); }
public SparseVector getDefaultWeights () { return defaultWeights; }
public void setDefaultWeights (SparseVector w) { defaultWeights = w; }
public void setDefaultWeight (int i, double w) { defaultWeights.setValue (i, w); }
private boolean trainable = true;
public boolean isTrainable () { return trainable; }
public void setTrainable (boolean tr) { trainable = tr; }
// I hate serialization
private static final long serialVersionUID = -727618747254644076L; //8830720632081401678L;
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException
{
in.defaultReadObject ();
if (assignmentsPresent == null) {
assignmentsPresent = new BitSet (weights.length);
assignmentsPresent.flip (0, assignmentsPresent.size ());
}
}
protected Assignment computeAssignment (Assignment graphAssn, VarSet vs)
{
return (Assignment) graphAssn.marginalize (vs);
}
}
/** Abstract class for Templates that expect a (FeatureVectorSequence, LabelsSequence) for their instances. */
public abstract static class SequenceTemplate extends Template
{
/**
* Adds all instiated cliques for an instance. This method is
* called as a graph is being unrolled for an instance.
*
* @param graph The graph that the cliques will be added to.
* @param fvs The input features of the instance to unroll the
* cliques for.
* @param lblseq The label sequence of the instance being unrolled.
*/
protected abstract void addInstantiatedCliques (UnrolledGraph graph, FeatureVectorSequence fvs, LabelsAssignment lblseq);
public void addInstantiatedCliques (UnrolledGraph graph, Instance instance)
{
FeatureVectorSequence fvs = (FeatureVectorSequence) instance.getData ();
LabelsAssignment lblseq = (LabelsAssignment) instance.getTarget ();
addInstantiatedCliques (graph, fvs, lblseq);
}
}
// Abstract class for potentials that have no weights, but that know
// how te construct a potential
public abstract static class FixedFactorTemplate extends Template {
public int initWeights (InstanceList training) { return 0; }
public SparseVector[] getWeights () { return new SparseVector [0]; }
public SparseVector getDefaultWeights () { return new SparseVector (); }
public boolean isTrainable () { return false; }
public void setTrainable (boolean tr)
{
if (tr)
throw new IllegalArgumentException ("This template is never trainable.");
}
public abstract AbstractTableFactor computeFactor (UnrolledVarSet clique);
}
/**
* A clique in the unrolled graphical model (an instantiation of
* some Template). Contains a pointer to its corresponding
* template and a FeatureVector.
*/
public static class UnrolledVarSet extends HashVarSet
{
Template tmpl; // Template that generated this clique
FeatureVector fv; // Features for the clique
Variable[] vars;
Factor factor; // Factor compute for this clique
UnrolledGraph graph;
double lastChange; // If cacheGraphs, returns this change in this varSet's factor since last grad call
public int[] varDimensions ()
{
int[] dims = new int[size()];
for (int i = 0; i < size(); i++) {
dims [i] = get(i).getNumOutcomes();
}
return dims;
}
public UnrolledVarSet (UnrolledGraph graph, Template tmpl, Variable[] vars, FeatureVector fv)
{
super (vars);
this.graph = graph;
this.vars = vars;
this.tmpl = tmpl;
this.fv = fv;
}
Assignment getAssignmentByNumber (int assn)
{
int[] sizes = varDimensions();
int[] indices = new int [sizes.length];
Matrixn.singleToIndices (assn, indices, sizes);
return new Assignment (vars, indices);
}
final public int lookupAssignmentNumber ()
{
Assignment mine = lookupAssignment ();
return mine.singleIndex ();
}
final public Assignment lookupAssignment ()
{
return tmpl.computeAssignment (graph.getAssignment (), this);
}
public int lookupNumberOfAssignment (Assignment assn)
{
int[] sizes = varDimensions();
int[] indices = new int [sizes.length];
for (int i = 0; i < indices.length; i++) {
indices[i] = assn.get (vars[i]);
}
return Matrixn.singleIndex (sizes, indices);
}
public Template getTemplate ()
{
return tmpl;
}
public FeatureVector getFv () { return fv; }
public Factor getFactor ()
{
return factor;
}
private void setFactor (Factor newF)
{
if (factor != null) {
lastChange = Factors.distLinf ((AbstractTableFactor) newF, (AbstractTableFactor) factor);
}
this.factor = newF;
}
public double getLastChange ()
{
return lastChange;
}
}
public static class UnrolledGraph extends UndirectedModel
//TODO: public static class UnrolledGraph extends FactorGraph
// implements Compactible
{
/** Array of Variables containing all nodes in model. */
List allVars = new ArrayList ();
/** Array containing all instantiated cliques (UnrolledClique) in the model. */
List cliques = new ArrayList ();
/** The number of Label objects in each Labels object */
int numSlices;
boolean isCached = false;
Instance instance;
FeatureVectorSequence fvs;
private Assignment assignment; // output
LabelAlphabet[] outputAlphabets;
ACRF acrf;
List allTemplates;
private boolean isFactorsAdded = false;
private THashMap uvsMap = new THashMap ();
public UnrolledGraph (Instance inst, Template[] templates, Template[] fixed) {
this (inst, templates, java.util.Arrays.asList (fixed));
}
UnrolledGraph (Instance inst, Template[] templates, List fixed) { this (inst, templates, fixed, true); }
/**
* Creates a graphical model for a given instance.
* This is called unrolling a dynamic model.
*/
public UnrolledGraph (Instance inst, Template[] templates, List fixed, boolean setupPotentials)
{
super (initialCapacity (inst));
instance = inst;
fvs = (FeatureVectorSequence) inst.getData ();
assignment = (Assignment) inst.getTarget ();
allTemplates = new ArrayList ();
if (fixed != null) {
allTemplates.addAll (fixed);
}
allTemplates.addAll (java.util.Arrays.asList (templates));
setupGraph ();
if (setupPotentials) {
computeCPFs ();
}
}
// Guesses how much cache the undirected model should have space for.
private static int initialCapacity (Instance inst)
{
if (inst.getData () == null) { return 8; }
FeatureVectorSequence fvs = (FeatureVectorSequence) inst.getData ();
int T = fvs.size ();
return 3 * T;
}
private void setupGraph ()
{
for (Iterator it = allTemplates.iterator (); it.hasNext ();) {
Template tmpl = (Template) it.next ();
tmpl.addInstantiatedCliques (this, instance);
}
} // setupGraph
public void addClique (UnrolledVarSet clique)
{
cliques.add (clique);
}
private void computeCPFs ()
{
isFactorsAdded = true;
TDoubleArrayList residTmp = new TDoubleArrayList ();
for (Iterator it = cliques.iterator(); it.hasNext();) {
UnrolledVarSet clique = (UnrolledVarSet) it.next();
AbstractTableFactor ptl = clique.tmpl.computeFactor (clique);
addFactorInternal (clique, ptl);
clique.tmpl.modifyPotential (this, clique, ptl);
uvsMap.put (ptl, clique);
// sigh
LogTableFactor unif = new LogTableFactor (clique);
residTmp.add (Factors.distLinf (unif, ptl));
}
lastResids = residTmp.toNativeArray ();
}
/** Adds FACTOR to this graph, but while maintaining the invariant that every set of variables has
* at most one factor over exactly that domain. If the given FACTOR has a domain that is already
* used by some other factor PREV, then PREV is replaced with a FactorGraph containing PREV and FACTOR.
* @param clique
* @param factor The factor to add
*/
private void addFactorInternal (UnrolledVarSet clique, Factor factor)
{
clique.setFactor (factor);
Factor prevFactor = factorOf (factor.varSet ());
if (prevFactor == null) {
addFactor (factor);
} else if (prevFactor instanceof FactorGraph) {
prevFactor.multiplyBy (factor);
} else {
divideBy (prevFactor);
addFactor (new FactorGraph (new Factor[] { factor, prevFactor }));
}
}
private double[] lastResids;
private void recomputeFactors ()
{
lastResids = new double [factors ().size ()];
for (Iterator it = cliques.iterator(); it.hasNext();) {
UnrolledVarSet clique = (UnrolledVarSet) it.next();
AbstractTableFactor oldF = (AbstractTableFactor) clique.getFactor ();
AbstractTableFactor newF = clique.tmpl.computeFactor (clique);
double dist = Factors.distLinf ((AbstractTableFactor) oldF.duplicate ().normalize (), (AbstractTableFactor) newF.duplicate ().normalize ());
lastResids [getIndex (oldF)] = dist;
oldF.setValues (newF.getLogValueMatrix ());
clique.tmpl.modifyPotential (this, clique, oldF);
}
}
public double[] getLastResids ()
{
return lastResids;
}
// Accessors
int getMaxTime () { return fvs.size(); }
int getNumFactors () { return outputAlphabets.length; }
/**
* Returns an Assignment object that corresponds to the
* LabelSequence for which this graph was unrolled.
*/
public Assignment getAssignment ()
{
return assignment;
}
// xxx These should be refactor to UndirectedModel, and automatically add EvidencePotentials
TObjectIntHashMap observedVars = new TObjectIntHashMap ();
private boolean isObserved (Variable var)
{
return observedVars.contains (var);
}
public void setObserved (Variable var, int outcome)
{
observedVars.put (var, outcome);
}
public int observedValue (Variable var)
{
return observedVars.get (var);
}
public Iterator unrolledVarSetIterator ()
{
return cliques.iterator ();
}
public UnrolledVarSet getUnrolledVarSet (int cnum)
{
return (UnrolledVarSet) cliques.get (cnum);
}
public int getIndex (VarSet vs)
{
return cliques.indexOf (vs);
}
public Variable get (int idx)
{
if (isFactorsAdded) {
return super.get (idx);
} else {
return (Variable) allVars.get (idx);
}
}
public int getIndex (Variable var)
{
if (isFactorsAdded) {
return super.getIndex (var);
} else {
return allVars.indexOf (var);
}
}
public double getLogNumAssignments ()
{
double total = 0;
for (int i = 0; i < numVariables (); i++) {
Variable var = get(i);
total += Math.log (var.getNumOutcomes ());
}
return total;
}
// convenience method
public Variable varOfIndex (int t, int j)
{
LabelsAssignment lblseq = (LabelsAssignment) instance.getTarget ();
return lblseq.varOfIndex (t, j);
}
public int numSlices ()
{
LabelsAssignment lblseq = (LabelsAssignment) instance.getTarget ();
return lblseq.numSlices ();
}
// computes the residual of each factor, without actually changing this unrolled graph
public double[] computeCurrentResids ()
{
lastResids = new double [factors ().size ()];
for (Iterator it = cliques.iterator(); it.hasNext();) {
UnrolledVarSet clique = (UnrolledVarSet) it.next();
AbstractTableFactor oldF = (AbstractTableFactor) clique.getFactor ();
AbstractTableFactor newF = clique.tmpl.computeFactor (clique);
double dist = Factors.distLinf (oldF, newF);
lastResids [getIndex (oldF)] = dist;
}
return lastResids;
}
public UnrolledVarSet getUnrolledVarSet (Factor f)
{
return (UnrolledVarSet) uvsMap.get (f);
}
}
public Optimizable.ByGradientValue getMaximizable (InstanceList ilst)
{
return new MaximizableACRF (ilst);
}
public List bestAssignment (InstanceList lst)
{
List ret = new ArrayList (lst.size());
for (int i = 0; i < lst.size(); i++) {
ret.add (bestAssignment (lst.get (i)));
}
return ret;
}
public Assignment bestAssignment (Instance inst)
{
// Compute the MAP assignment
UnrolledGraph unrolled = unroll (inst);
return Models.bestAssignment (unrolled, viterbi);
}
public List getBestLabels (InstanceList lst)
{
List ret = new ArrayList (lst.size());
for (int i = 0; i < lst.size(); i++) {
ret.add (getBestLabels (lst.get (i)));
}
return ret;
}
public LabelsSequence getBestLabels (Instance inst)
{
Assignment assn = bestAssignment (inst);
LabelsAssignment gold = (LabelsAssignment) inst.getTarget ();
return gold.toLabelsSequence (assn);
}
public UnrolledGraph unroll (Instance inst)
{
UnrolledGraph g;
if (cacheUnrolledGraphs && graphCache.containsKey (inst)) {
g = (UnrolledGraph) graphCache.get (inst);
g.recomputeFactors ();
} else {
g = new UnrolledGraph (inst, templates, fixedPtls);
if (graphProcessor != null)
graphProcessor.process (g, inst);
}
if (cacheUnrolledGraphs) graphCache.put (inst, g);
return g;
}
public UnrolledGraph unrollStructureOnly (Instance inst)
{
UnrolledGraph g;
if (cacheUnrolledGraphs && graphCache.containsKey (inst)) {
g = (UnrolledGraph) graphCache.get (inst);
g.recomputeFactors ();
} else {
g = new UnrolledGraph (inst, templates, fixedPtls, false);
if (graphProcessor != null)
graphProcessor.process (g, inst);
}
if (cacheUnrolledGraphs) graphCache.put (inst, g);
return g;
}
private void reportOnGraphCache ()
{
logger.info ("Number of cached graphs = "+graphCache.size ());
}
public class MaximizableACRF implements Optimizable.ByGradientValue, Serializable {
InstanceList trainData;
double cachedValue = -123456789;
double[] cachedGradient;
protected BitSet infiniteValues = null;
boolean cachedValueStale, cachedGradientStale;
private int numParameters;
private int totalNodes = 0;
private static final boolean printGradient = false;
/** An unrolled version of the ACRF. */
transient private UnrolledGraph graph;
protected Inferencer inferencer = globalInferencer.duplicate();
/* Vectors that contain the counts of features observed in the
training data. Maps
(clique-template x feature-number) => count
*/
SparseVector constraints[][];
/* Vectors that contain the expected value over the
* labels of all the features, have seen the training data
* (but not the training labels).
*/
SparseVector expectations[][];
SparseVector defaultConstraints[];
SparseVector defaultExpectations[];
private void initWeights (InstanceList training)
{
for (int tidx = 0; tidx < templates.length; tidx++) {
numParameters += templates[tidx].initWeights (training);
}
}
/* Initialize constraints[][] and expectations[][]
* to have the same dimensions as weights, but to
* be all zero.
*/
private void initConstraintsExpectations ()
{
// Do the defaults first
defaultConstraints = new SparseVector [templates.length];
defaultExpectations = new SparseVector [templates.length];
for (int tidx = 0; tidx < templates.length; tidx++) {
SparseVector defaults = templates[tidx].getDefaultWeights();
defaultConstraints[tidx] = (SparseVector) defaults.cloneMatrixZeroed ();
defaultExpectations[tidx] = (SparseVector) defaults.cloneMatrixZeroed ();
}
// And now the others
constraints = new SparseVector [templates.length][];
expectations = new SparseVector [templates.length][];
for (int tidx = 0; tidx < templates.length; tidx++) {
Template tmpl = templates [tidx];
SparseVector[] weights = tmpl.getWeights();
constraints [tidx] = new SparseVector [weights.length];
expectations [tidx] = new SparseVector [weights.length];
for (int i = 0; i < weights.length; i++) {
constraints[tidx][i] = (SparseVector) weights[i].cloneMatrixZeroed ();
expectations[tidx][i] = (SparseVector) weights[i].cloneMatrixZeroed ();
}
}
}
/**
* Set all expectations to 0 after they've been
* initialized.
*/
void resetExpectations ()
{
for (int tidx = 0; tidx < expectations.length; tidx++) {
defaultExpectations [tidx].setAll (0.0);
for (int i = 0; i < expectations[tidx].length; i++) {
expectations[tidx][i].setAll (0.0);
}
}
}
protected MaximizableACRF (InstanceList ilist)
{
logger.finest ("Initializing MaximizableACRF.");
/* allocate for weights, constraints and expectations */
this.trainData = ilist;
initWeights(trainData);
initConstraintsExpectations();
int numInstances = trainData.size();
cachedGradient = new double[numParameters];
cachedValueStale = cachedGradientStale = true;
/*
if (cacheUnrolledGraphs) {
unrolledGraphs = new UnrolledGraph [numInstances];
}
*/
logger.info("Number of training instances = " + numInstances );
logger.info("Number of parameters = " + numParameters );
logger.info("Default feature index = " + defaultFeatureIndex );
describePrior();
logger.fine("Computing constraints");
collectConstraints (trainData);
}
private void describePrior ()
{
logger.info ("Using gaussian prior with variance "+gaussianPriorVariance);
}
/* not tested
protected MaximizableDCRF (MaximizableACRF maxable, InstanceList ilist)
{
logger.finest ("Initializing MaximizableACRF.");
this.trainData = ilist;
initConstraintsExpectations();
constraints = maxable.constraints; // These can be shared
int numInstances = trainData.size();
// These must occur after initWeights()
this.numParameters = numWeights;
cachedGradient = new double[numParameters];
cachedValueStale = cachedGradientStale = true;
if (cacheUnrolledGraphs) {
unrolledGraphs = new UnrolledGraph [numInstances];
}
}
*/
public int getNumParameters() { return numParameters; }
/* Negate initialValue and finalValue because the parameters are in
* terms of "weights", not "values".
*/
public void getParameters (double[] buf) {
if ( buf.length != numParameters )
throw new IllegalArgumentException("Argument is not of the " +
" correct dimensions");
int idx = 0;
for (int tidx = 0; tidx < templates.length; tidx++) {
Template tmpl = templates [tidx];
SparseVector defaults = tmpl.getDefaultWeights ();
double[] values = defaults.getValues();
System.arraycopy (values, 0, buf, idx, values.length);
idx += values.length;
}
for (int tidx = 0; tidx < templates.length; tidx++) {
Template tmpl = templates [tidx];
SparseVector[] weights = tmpl.getWeights();
for (int assn = 0; assn < weights.length; assn++) {
double[] values = weights [assn].getValues ();
System.arraycopy (values, 0, buf, idx, values.length);
idx += values.length;
}
}
}
public void setParameters (double[] params)
{
if ( params.length != numParameters )
throw new IllegalArgumentException("Argument is not of the " +
" correct dimensions");
cachedValueStale = cachedGradientStale = true;
int idx = 0;
for (int tidx = 0; tidx < templates.length; tidx++) {
Template tmpl = templates [tidx];
SparseVector defaults = tmpl.getDefaultWeights();
double[] values = defaults.getValues ();
System.arraycopy (params, idx, values, 0, values.length);
idx += values.length;
}
for (int tidx = 0; tidx < templates.length; tidx++) {
Template tmpl = templates [tidx];
SparseVector[] weights = tmpl.getWeights();
for (int assn = 0; assn < weights.length; assn++) {
double[] values = weights [assn].getValues ();
System.arraycopy (params, idx, values, 0, values.length);
idx += values.length;
}
}
}
// Functions for unit tests to get constraints and expectations
// I'm too lazy to make a deep copy. Callers should not
// modify these.
public SparseVector[] getExpectations (int cnum) { return expectations [cnum]; }
public SparseVector[] getConstraints (int cnum) { return constraints [cnum]; }
/** print weights */
private void printParameters()
{
double[] buf = new double[numParameters];
getParameters(buf);
int len = buf.length;
for (int w = 0; w < len; w++)
System.out.print(buf[w] + "\t");
System.out.println();
}
public double getParameter (int index) { return(0.0); }
public void setParameter (int index, double value) {}
/** Returns the log probability of the training sequence labels */
public double getValue ()
{
if (cachedValueStale)
{
cachedValue = computeLogLikelihood ();
cachedValueStale = false;
cachedGradientStale = true;
/*
if(saveNum++ % savePeriod == 0) {
System.out.println ("saving ACRF ...");
ACRF.this.writeWeights(weightFile);
System.out.println ("Done ....");
}
*/
logger.info ("getValue() (loglikelihood) = " + cachedValue);
}
if(Double.isNaN(cachedValue))
{
logger.warning("value is NaN");
cachedValue = 0;
}
return cachedValue;
}
protected double computeLogLikelihood () {
double retval = 0.0;
int numInstances = trainData.size();
long start = System.currentTimeMillis();
long unrollTime = 0;
long marginalsTime = 0;
/* Instance values must either always or never be included in
* the total values; we can't just sometimes skip a value
* because it is infinite, that throws off the total values.
* We only allow an instance to have infinite value if it happens
* from the start (we don't compute the value for the instance
* after the first round. If any other instance has infinite
* value after that it is an error. */
boolean initializingInfiniteValues = false;
if (infiniteValues == null) {
/* We could initialize bitset with one slot for every
* instance, but it is *probably* cheaper not to, taking the
* time hit to allocate the space if a bit becomes
* necessary. */
infiniteValues = new BitSet ();
initializingInfiniteValues = true;
}
/* Clear the sufficient statistics that we are about to fill */
resetExpectations();
/* Fill in expectations for each instance */
for (int i = 0; i < numInstances; i++)
{
Instance instance = trainData.get(i);
/* Compute marginals for each clique */
long unrollStart = System.currentTimeMillis ();
UnrolledGraph unrolled = unroll (instance);
long unrollEnd = System.currentTimeMillis ();
unrollTime += (unrollEnd - unrollStart);
if (unrolled.numVariables () == 0) continue; // Happens if all nodes are pruned.
inferencer.computeMarginals (unrolled);
marginalsTime += (System.currentTimeMillis () - unrollEnd);
// unrolled.dump();
/* Save the expected value of each feature for when we
compute the gradient. */
collectExpectations (unrolled, inferencer);
/* Add in the joint prob of the labeling. */
Assignment jointAssn = unrolled.getAssignment ();
double value = inferencer.lookupLogJoint (jointAssn);
if (Double.isInfinite(value))
{
if (initializingInfiniteValues) {
logger.warning ("Instance " + instance.getName() +
" has infinite value; skipping.");
infiniteValues.set (i);
continue;
} else if (!infiniteValues.get(i)) {
logger.warning ("Infinite value on instance "+instance.getName()+
"returning -infinity");
return Double.NEGATIVE_INFINITY;
/*
printDebugInfo (unrolled);
throw new IllegalStateException
("Instance " + instance.getName()+ " used to have non-infinite"
+ " value, but now it has infinite value.");
*/
}
} else if (Double.isNaN (value)) {
System.out.println("NaN on instance "+i+" : "+instance.getName ());
printDebugInfo (unrolled);
/* throw new IllegalStateException
("Value is NaN in ACRF.getValue() Instance "+i);
*/
logger.warning ("Value is NaN in ACRF.getValue() Instance "+i+" : "+
"returning -infinity... ");
return Double.NEGATIVE_INFINITY;
} else {
retval += value;
}
}
if (doSizeScale) {
retval = retval / trainData.size ();
}
/* Incorporate Gaussian prior on parameters. This means
that for each weight, we will add w^2 / (2 * variance) to the
log probability. */
double priorDenom = 2 * gaussianPriorVariance;
for (int tidx = 0; tidx < templates.length; tidx++) {
SparseVector[] weights = templates [tidx].getWeights ();
for (int j = 0; j < weights.length; j++) {
for (int fnum = 0; fnum < weights[j].numLocations(); fnum++) {
double w = weights [j].valueAtLocation (fnum);
if (weightValid (w, tidx, j)) {
retval += -w*w/priorDenom;
}
}
}
}
if (cacheUnrolledGraphs) reportOnGraphCache ();
long end = System.currentTimeMillis ();
logger.info ("ACRF Inference time (ms) = "+(end-start));
logger.info ("ACRF marginals time (ms) = "+marginalsTime);
logger.info ("ACRF unroll time (ms) = "+unrollTime);
logger.info ("getValue (loglikelihood) = "+retval);
return retval;
}
/**
* Computes the graident of the penalized log likelihood
* of the ACRF, and returns it in buf[].
*/
public void getValueGradient(double[] buf)
{
if (cachedGradientStale)
{
/* This will fill in the expectations */
if (cachedValueStale) getValue ();
computeGradient ();
cachedGradientStale = false;
}
if (buf.length != numParameters)
throw new IllegalArgumentException
("Incorrect length buffer to getValueGradient(). Expected "
+ numParameters + ", received " + buf.length);
System.arraycopy (cachedGradient, 0, buf, 0, cachedGradient.length);
}
/**
* Computes the gradient of the penalized log likelihood of the
* ACRF, and places it in cachedGradient[].
*
* Gradient is
* constraint - expectation - parameters/gaussianPriorVariance
*/
private void computeGradient ()
{
/* Index into current element of cachedGradient[] array. */
int gidx = 0;
// First do gradient wrt defaultWeights
for (int tidx = 0; tidx < templates.length; tidx++) {
SparseVector theseWeights = templates[tidx].getDefaultWeights ();
SparseVector theseConstraints = defaultConstraints [tidx];
SparseVector theseExpectations = defaultExpectations [tidx];
for (int j = 0; j < theseWeights.numLocations(); j++) {
double weight = theseWeights.valueAtLocation (j);
double constraint = theseConstraints.valueAtLocation (j);
double expectation = theseExpectations.valueAtLocation (j);
if (printGradient)
System.out.println (" gradient [" + gidx + "] = DEFAULT("+templates[tidx]+"["+j+"]) = "
+ constraint + " (ctr) - " + expectation + " (exp) - " +
(weight / gaussianPriorVariance) + " (reg) ");
double scale = doSizeScale ? (1.0 / trainData.size()) : 1.0;
cachedGradient [gidx++] = scale * (constraint - expectation) - (weight / gaussianPriorVariance);
}
}
// Now do other weights
for (int tidx = 0; tidx < templates.length; tidx++) {
Template tmpl = templates [tidx];
SparseVector[] weights = tmpl.getWeights ();
for (int i = 0; i < weights.length; i++) {
SparseVector thisWeightVec = weights [i];
SparseVector thisConstraintVec = constraints [tidx][i];
SparseVector thisExpectationVec = expectations [tidx][i];
for (int j = 0; j < thisWeightVec.numLocations(); j++) {
double w = thisWeightVec.valueAtLocation (j);
double gradient; // Computed below
/* A parameter may be set to -infinity by an external user.
* We set gradient to 0 because the parameter's value can
* never change anyway and it will mess up future calculations
* on the matrix. */
if (Double.isInfinite(w)) {
logger.warning("Infinite weight for node index " +i+
" feature " +
inputAlphabet.lookupObject(j) );
gradient = 0.0;
} else {
double constraint = thisConstraintVec.valueAtLocation (j);
double expectation = thisExpectationVec.valueAtLocation (j);
double scale = doSizeScale ? (1.0 / trainData.size()) : 1.0;
gradient = scale * (constraint - expectation) - (w / gaussianPriorVariance);
if (printGradient) {
String featureName = (String) inputAlphabet.lookupObject (j);
System.out.println (" gradient [" + gidx + "] = WEIGHT("+templates[tidx]+"["+i+"]) ["+featureName+"] = "
+ constraint + " (ctr) - " + expectation + " (exp) - " +
(w / gaussianPriorVariance) + " (reg) ");
}
}
cachedGradient[gidx++] = gradient;
}
}
}
// reportGradient ();
}
// Only useful for debugging
private int gradCallNo = 0;
private void reportGradient ()
{
if (verboseOutputDirectory != null) {
gradCallNo++;
try {
File thisFile = new File (verboseOutputDirectory, "acrf-grad-"+gradCallNo+".txt");
PrintWriter writer = new PrintWriter (new FileWriter (thisFile));
writer.println (ArrayUtils.toString (cachedGradient));
writer.close ();
thisFile = new File (verboseOutputDirectory, "acrf-value-"+gradCallNo+".txt");
writer = new PrintWriter (new FileWriter (thisFile));
writer.println (cachedValue);
writer.close ();
double[] buf = new double [getNumParameters()];
getParameters (buf);
thisFile = new File (verboseOutputDirectory, "acrf-weight-"+gradCallNo+".txt");
writer = new PrintWriter (new FileWriter (thisFile));
writer.println (ArrayUtils.toString (buf));
writer.close ();
thisFile = new File (verboseOutputDirectory, "acrf-constraint-"+gradCallNo+".txt");
printVecs (thisFile, defaultConstraints, constraints);
thisFile = new File (verboseOutputDirectory, "acrf-exp-"+gradCallNo+".txt");
printVecs (thisFile, defaultExpectations, expectations);
thisFile = new File (verboseOutputDirectory, "acrf-dumps-"+gradCallNo+".txt");
writer = new PrintWriter (new FileWriter (thisFile));
for (int ii = 0; ii < trainData.size(); ii++) {
ACRF.UnrolledGraph unrolled = unroll (trainData.get (ii));
writer.println (unrolled);
}
writer.close ();
} catch (IOException e) {
throw new RuntimeException (e);
}
}
}
private void printVecs (File thisFile, SparseVector[] defaultConstraints, SparseVector[][] constraints) throws IOException
{
PrintWriter writer = new PrintWriter (new FileWriter (thisFile));
for (int ti = 0; ti < defaultConstraints.length; ti++) {
writer.println (defaultConstraints [ti]);
}
for (int ti = 0; ti < constraints.length; ti++) {
for (int i = 0; i < constraints[ti].length; i++) {
writer.println (constraints [ti][i]);
}
}
writer.close ();
}
/**
* For every feature f_k, computes the expected value of f_k
* aver all possible label sequences given the list of instances
* we have.
*
* These values are stored in collector, that is,
* collector[i][j][k] gets the expected value for the
* feature for clique i, label assignment j, and input features k.
*/
private void collectExpectations (UnrolledGraph unrolled, Inferencer inferencer)
{
for (Iterator it = unrolled.unrolledVarSetIterator (); it.hasNext();) {
UnrolledVarSet clique = (UnrolledVarSet) it.next();
int tidx = clique.tmpl.index;
if (tidx == -1) continue;
Factor ptl = inferencer.lookupMarginal (clique);
// for each assigment to the clique
// Note that we get the AssignmentIterator from the factor (rather than the clique), because the
// factor objects knows about any potential sparsity.
/* Also, note that we use assnIt.indexOfCurrentAssn(). This assumes that the ordering of variables in the
* VarSet returned by lookupMargianl() is consistent between all calls to the method. This is a somewhat brittle
* assumption, but I don't see how to relax it without being terribly inefficient. */
AssignmentIterator assnIt = ptl.assignmentIterator ();
while (assnIt.hasNext ()) {
double marginal = ptl.value (assnIt);
int idx = assnIt.indexOfCurrentAssn ();
expectations [tidx][idx].plusEqualsSparse (clique.fv, marginal);
if (defaultExpectations[tidx].location (idx) != -1)
defaultExpectations [tidx].incrementValue (idx, marginal);
assnIt.advance ();
}
}
}
public void collectConstraints (InstanceList ilist)
{
for (int inum = 0; inum < ilist.size(); inum++) {
logger.finest ("*** Collecting constraints for instance "+inum);
Instance inst = ilist.get (inum);
UnrolledGraph unrolled = new UnrolledGraph (inst, templates, null, false);
totalNodes =+ unrolled.numVariables ();
for (Iterator it = unrolled.unrolledVarSetIterator (); it.hasNext();) {
UnrolledVarSet clique = (UnrolledVarSet) it.next();
int tidx = clique.tmpl.index;
if (tidx == -1) continue;
int assn = clique.lookupAssignmentNumber ();
constraints [tidx][assn].plusEqualsSparse (clique.fv);
if (defaultConstraints[tidx].location (assn) != -1)
defaultConstraints [tidx].incrementValue (assn, 1.0);
}
}
}
void dumpGradientToFile (String fileName)
{
try {
PrintStream w = new PrintStream (new FileOutputStream (fileName));
for (int i = 0; i < numParameters; i++) {
w.println (cachedGradient[i]);
}
w.close ();
} catch (IOException e) {
System.err.println("Could not open output file.");
e.printStackTrace ();
}
}
void dumpDefaults ()
{
System.out.println("Default constraints");
for (int i = 0; i < defaultConstraints.length; i++) {
System.out.println("Template "+i);
defaultConstraints[i].print ();
}
System.out.println("Default expectations");
for (int i = 0; i < defaultExpectations.length; i++) {
System.out.println("Template "+i);
defaultExpectations[i].print ();
}
}
void printDebugInfo (UnrolledGraph unrolled)
{
print (System.err);
Assignment assn = unrolled.getAssignment ();
for (Iterator it = unrolled.unrolledVarSetIterator (); it.hasNext();) {
UnrolledVarSet clique = (UnrolledVarSet) it.next();
System.out.println("Clique "+clique);
dumpAssnForClique (assn, clique);
Factor ptl = unrolled.factorOf (clique);
System.out.println("Value = "+ptl.value (assn));
System.out.println(ptl);
}
}
void dumpAssnForClique (Assignment assn, UnrolledVarSet clique)
{
for (Iterator it = clique.iterator(); it.hasNext();) {
Variable var = (Variable) it.next();
System.out.println(var+" ==> "+assn.getObject (var)
+" ("+assn.get (var)+")");
}
}
private boolean weightValid (double w, int cnum, int j)
{
if (Double.isInfinite (w)) {
logger.warning ("Weight is infinite for clique "+cnum+"assignment "+j);
return false;
} else if (Double.isNaN (w)) {
logger.warning ("Weight is Nan for clique "+cnum+"assignment "+j);
return false;
} else {
return true;
}
}
public void report ()
{
int nmsg = -1;
if (inferencer instanceof AbstractBeliefPropagation) {
nmsg = ((AbstractBeliefPropagation)inferencer).getTotalMessagesSent();
} else if (inferencer instanceof JunctionTreeInferencer) {
nmsg = ((JunctionTreeInferencer)inferencer).getTotalMessagesSent();
}
if (nmsg != -1)
logger.info ("Total messages sent = "+nmsg);
}
public void forceStale ()
{
cachedValueStale = cachedGradientStale = true;
}
public int getTotalNodes ()
{
return totalNodes;
}
} // MaximizableACRF
// printing functions
public void print (OutputStream os)
{
PrintStream out = new PrintStream (os);
out.println ("ACRF. Number of templates: == "+templates.length);
out.println ("Weights");
for (int tidx = 0; tidx < templates.length; tidx++) {
Template tmpl = templates [tidx];
out.println ("TEMPLATE "+tidx+" == "+tmpl);
out.println ("Default weights: ");
SparseVector defaults = tmpl.getDefaultWeights ();
for (int loc = 0; loc < defaults.numLocations (); loc++)
out.println (" ["+defaults.indexAtLocation (loc)+"] = "+defaults.valueAtLocation (loc));
SparseVector[] weights = tmpl.getWeights ();
for (int assn = 0; assn < weights.length; assn++) {
out.println ("Assignment "+assn);
SparseVector w = weights[assn];
for (int x = 0; x < w.numLocations(); x++) {
int idx = w.indexAtLocation (x);
if (idx == defaultFeatureIndex) {
out.print ("DEFAULT");
} else {
out.print (inputAlphabet.lookupObject (idx));
}
out.println (" "+w.valueAtLocation (x));
}
}
}
}
private static void dumpValues (String title, SparseVector[][] values)
{
try {
for (int cnum = 0; cnum < values.length; cnum++) {
System.out.println (title+" Clique: "+cnum);
writeCliqueValues (values [cnum]);
}
} catch (IOException e) {
System.err.println("Error writing to file!");
e.printStackTrace ();
}
}
private static void writeCliqueValues (SparseVector[] values)
throws IOException
{
System.out.println("Num assignments = "+values.length);
for (int assn = 0; assn < values.length; assn++) {
System.out.println("Num locations = "+values[assn].numLocations());
for (int j = 0; j < values[assn].numLocations(); j++) {
int idx = values[assn].indexAtLocation (j);
System.out.print ("sparse ["+assn+"]["+idx+"] = ");
System.out.println (values[assn].valueAtLocation (j));
}
}
}
private void dumpOneGraph (UnrolledGraph unrolled)
{
Assignment assn = unrolled.getAssignment ();
for (Iterator it = unrolled.unrolledVarSetIterator (); it.hasNext();) {
UnrolledVarSet clique = (UnrolledVarSet) it.next();
System.out.println("Clique "+clique);
// dumpAssnForClique (assn, clique);
Factor ptl = unrolled.factorOf (clique);
if (ptl != null) System.out.println (ptl);
}
}
public void dumpUnrolledGraphs (InstanceList lst)
{
for (int i = 0; i < lst.size(); i++) {
Instance inst = lst.get (i);
System.out.println("INSTANCE "+i+" : "+inst.getName ());
UnrolledGraph unrolled = unroll (inst);
dumpOneGraph (unrolled);
}
}
// Templates
/**
* A template that adds edges between adjacent nodes in a label
* sequence for one factor.
*/
public static class BigramTemplate extends ACRF.SequenceTemplate {
int factor;
public BigramTemplate (int factor)
{
this.factor = factor;
}
public void addInstantiatedCliques (ACRF.UnrolledGraph graph,
FeatureVectorSequence fvs,
LabelsAssignment lblseq)
{
for (int i = 0; i < lblseq.maxTime() - 1; i++) {
Variable v1 = lblseq.varOfIndex (i, factor);
Variable v2 = lblseq.varOfIndex (i + 1, factor);
FeatureVector fv = fvs.getFeatureVector (i);
Variable[] vars = new Variable[] { v1, v2 };
assert v1 != null : "Couldn't get label factor "+factor+" time "+i;
assert v2 != null : "Couldn't get label factor "+factor+" time "+(i+1);
ACRF.UnrolledVarSet clique = new ACRF.UnrolledVarSet (graph, this, vars, fv);
graph.addClique (clique);
}
}
public String toString ()
{
return "[BigramTemplate ("+factor+")]";
}
public int getFactor ()
{
return factor;
}
private static final long serialVersionUID = 8944142287103225874L;
}
/**
* A template that adds node potentials for a given factor.
*/
public static class UnigramTemplate extends ACRF.SequenceTemplate {
int factor;
public UnigramTemplate (int factor)
{
this.factor = factor;
}
public void addInstantiatedCliques (ACRF.UnrolledGraph graph,
FeatureVectorSequence fvs,
LabelsAssignment lblseq)
{
for (int i = 0; i < lblseq.maxTime(); i++) {
Variable v = lblseq.varOfIndex (i, factor);
FeatureVector fv = fvs.getFeatureVector (i);
Variable[] vars = new Variable[] { v };
assert v != null : "Couldn't get label factor "+factor+" time "+i;
ACRF.UnrolledVarSet clique = new ACRF.UnrolledVarSet (graph, this, vars, fv);
graph.addClique (clique);
}
}
public String toString ()
{
return "[UnigramTemplate ("+factor+")]";
}
private static final long serialVersionUID = 1L;
}
/**
* A template that adds edges between cotemporal nodes of a given pair
* of factors.
*/
public static class PairwiseFactorTemplate extends ACRF.SequenceTemplate {
int factor0;
int factor1;
public PairwiseFactorTemplate (int factor0, int factor1)
{
this.factor0 = factor0;
this.factor1 = factor1;
}
public void addInstantiatedCliques (ACRF.UnrolledGraph graph,
FeatureVectorSequence fvs,
LabelsAssignment lblseq)
{
for (int i = 0; i < lblseq.maxTime(); i++) {
Variable v1 = lblseq.varOfIndex (i, factor0);
Variable v2 = lblseq.varOfIndex (i, factor1);
FeatureVector fv = fvs.getFeatureVector (i);
Variable[] vars = new Variable[] { v1, v2 };
assert v1 != null : "Couldn't get label factor "+factor0+" time "+i;
assert v2 != null : "Couldn't get label factor "+factor1+" time "+i;
ACRF.UnrolledVarSet clique = new ACRF.UnrolledVarSet (graph, this, vars, fv);
graph.addClique (clique);
}
}
public String toString ()
{
return "[PairwiseFactorTemplate ("+factor0+", "+factor1+")]";
}
private static final long serialVersionUID = 1L;
}
public void readWeightsFromText (Reader reader) throws IOException
{
try {
Document d = new SAXBuilder ().build (reader);
Element root = d.getRootElement ();
List tmpls = root.getChildren ("TEMPLATE");
for (Iterator it = tmpls.iterator (); it.hasNext ();) {
Element tmplElt = (Element) it.next ();
String tmplName = tmplElt.getAttributeValue ("NAME");
int ti = Integer.parseInt (tmplElt.getAttributeValue ("IDX"));
ACRF.Template tmpl = templates[ti];
if (! tmpl.getClass ().getName().equals (tmplName)) {
throw new RuntimeException ("Expected template "+tmpl+"; got "+tmplName);
}
Element defWElt = tmplElt.getChild ("DEFAULT_WEIGHTS");
SparseVector defW = readSparseVector (defWElt.getText (), null);
Element wVecElt = tmplElt.getChild ("WEIGHTS");
int nw = Integer.parseInt (wVecElt.getAttributeValue ("SIZE"));
SparseVector[] w = new SparseVector[nw];
List wLst = wVecElt.getChildren ("WEIGHT");
for (Iterator it2 = wLst.iterator (); it2.hasNext ();) {
Element wElt = (Element) it2.next ();
int wi = Integer.parseInt (wElt.getAttributeValue ("IDX"));
w[wi] = readSparseVector (wElt.getText (), getInputAlphabet ());
}
tmpl.setDefaultWeights (defW);
tmpl.weights = w;
}
} catch (JDOMException e) {
throw new RuntimeException (e);
}
}
private SparseVector readSparseVector (String str, Alphabet dict) throws IOException
{
TIntArrayList idxs = new TIntArrayList ();
TDoubleArrayList vals = new TDoubleArrayList ();
String[] lines = str.split ("\n");
for (int li = 0; li < lines.length; li++) {
String line = lines[li];
if (Pattern.matches ("^\\s*$", line)) continue;
String[] fields = line.split ("\t");
int idx;
if (dict != null) {
idx = dict.lookupIndex (fields[0]);
} else {
idx = Integer.parseInt (fields[0]);
}
double val = Double.parseDouble (fields[1]);
idxs.add (idx);
vals.add (val);
}
return new SparseVector (idxs.toNativeArray (), vals.toNativeArray ());
}
public void writeWeightsText (Writer writer)
{
PrintWriter out = new PrintWriter (writer);
out.println ("<CRF>");
for (int ti = 0; ti < templates.length; ti++) {
Template tmpl = templates[ti];
out.println ("<TEMPLATE NAME=\""+tmpl.getClass ().getName ()+"\" IDX=\""+ti+"\" >");
out.println ("<DEFAULT_WEIGHTS>");
SparseVector defW = tmpl.getDefaultWeights ();
for (int loc = 0; loc < defW.numLocations (); loc++) {
out.print (defW.indexAtLocation (loc));
out.print ("\t");
out.println (defW.valueAtLocation (loc));
}
out.println ("</DEFAULT_WEIGHTS>");
out.println ();
SparseVector[] w = tmpl.getWeights ();
out.println ("<WEIGHTS SIZE=\""+w.length+"\">");
for (int wi = 0; wi < w.length; wi++) {
out.println ("<WEIGHT IDX=\""+wi+"\">");
writeWeightVector (out, w[wi]);
out.println ();
out.println ("</WEIGHT>");
}
out.println ("</WEIGHTS>");
out.println ("</TEMPLATE>");
}
out.println ("</CRF>");
}
private void writeWeightVector (PrintWriter out, SparseVector sv)
{
out.println ("<![CDATA[");
Alphabet dict = getInputAlphabet ();
for (int loc = 0; loc < sv.numLocations (); loc++) {
int idx = sv.indexAtLocation (loc);
double val = sv.valueAtLocation (loc);
if (idx < dict.size()) {
out.print (dict.lookupObject (idx));
} else {
out.print ("IDX"+idx);
}
out.print ("\t");
out.println (val);
}
out.println ("]]>");
}
// Convenient methods for constructing ACRFs
public static ACRF makeFactorial (Pipe p, int numLevels)
{
ArrayList t = new ArrayList ();
for (int i = 0; i < numLevels; i++) {
t.add (new BigramTemplate (i));
if (i+1 < numLevels)
t.add (new PairwiseFactorTemplate (i, i+1));
}
Template[] tmpls = (Template[]) t.toArray (new Template [t.size()]);
return new ACRF (p, tmpls);
}
// I hate serialization
private static final long serialVersionUID = 2865175696692468236L;//2113750667182393436L;
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException
{
in.defaultReadObject ();
graphCache = new THashMap ();
}
transient private File verboseOutputDirectory = null;
public void setVerboseOutputDirectory (File dir) { verboseOutputDirectory = dir; }
} // ACRF
| 67,678 | 31.616386 | 184 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/learning/ACRFEvaluator.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.learning;
import java.io.File;
import java.util.List;
import cc.mallet.grmm.util.GeneralUtils;
import cc.mallet.types.InstanceList;
import cc.mallet.util.FileUtils;
/**
*
* Created: Sun Jan 25 23:28:45 2004
*
* @author <a href="mailto:[email protected]">Charles Sutton</a>
* @version $Id: ACRFEvaluator.java,v 1.1 2007/10/22 21:37:43 mccallum Exp $
*/
public abstract class ACRFEvaluator {
// Evaulation settings
private int numIterToSkip = 10;
private int numIterToWait = 1;
public void setNumIterToSkip (int n) { numIterToSkip = n; }
public void setNumIterToWait (int n) { numIterToWait = n; }
/**
* Evalutes the model in the middle of training.
* @param acrf Model tha is being trained.
* @param iter How many iterations have been completed.
* @param training Training set.
* @param validation Validation set; may be null.
* @param testing Testing set; maybe null.
* @return Whether to continue training. If return is false, training should be be stopped.
*/
public abstract boolean evaluate (ACRF acrf, int iter,
InstanceList training,
InstanceList validation,
InstanceList testing);
public abstract void test (InstanceList gold, List returned,
String description);
public void test (ACRF acrf, InstanceList data, String description)
{
List ret = acrf.getBestLabels (data);
test (data, ret, description);
}
private File outputPrefix;
public void setOutputPrefix (File file) { outputPrefix = file; }
protected File makeOutputFile ()
{
try {
String name = GeneralUtils.classShortName (this);
return FileUtils.uniqueFile (outputPrefix, name, ".log");
} catch (java.io.IOException e) {
throw new RuntimeException (e);
}
}
protected boolean shouldDoEvaluate (int iter)
{
if (iter < numIterToWait) {
return false;
} else {
return (numIterToSkip <= 0) || (iter % numIterToSkip == 0);
}
}
} // ACRFEvaluator
| 2,431 | 28.658537 | 94 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/learning/AcrfSerialEvaluator.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.learning;
import java.util.List;
import java.util.Iterator;
import cc.mallet.grmm.learning.ACRF;
import cc.mallet.grmm.learning.ACRFEvaluator;
import cc.mallet.types.InstanceList;
/**
* Created: Aug 24, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: AcrfSerialEvaluator.java,v 1.1 2007/10/22 21:37:43 mccallum Exp $
*/
public class AcrfSerialEvaluator extends ACRFEvaluator {
private List evals;
public AcrfSerialEvaluator (List evals)
{
super();
this.evals = evals;
}
public boolean evaluate (ACRF acrf, int iter, InstanceList training, InstanceList validation, InstanceList testing)
{
boolean ret = true;
for (Iterator it = evals.iterator (); it.hasNext ();) {
ACRFEvaluator evaluator = (ACRFEvaluator) it.next ();
// Return false (i.e., stop training) if any sub-evaluator does.
ret = ret && evaluator.evaluate (acrf, iter, training, validation, testing);
}
return ret;
}
public void test (InstanceList gold, List returned, String description)
{
for (Iterator it = evals.iterator (); it.hasNext ();) {
ACRFEvaluator eval = (ACRFEvaluator) it.next ();
eval.test (gold, returned, description);
}
}
}
| 1,690 | 30.90566 | 117 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/learning/DefaultAcrfTrainer.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.learning;
import gnu.trove.TIntArrayList;
import java.io.File;
import java.io.FileWriter;
import java.io.PrintWriter;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import java.util.logging.Logger;
import cc.mallet.grmm.learning.ACRF;
import cc.mallet.grmm.learning.ACRFEvaluator;
import cc.mallet.grmm.learning.ACRF.MaximizableACRF;
import cc.mallet.grmm.util.LabelsAssignment;
import cc.mallet.optimize.ConjugateGradient;
import cc.mallet.optimize.LimitedMemoryBFGS;
import cc.mallet.optimize.Optimizable;
import cc.mallet.optimize.Optimizer;
import cc.mallet.types.*;
import cc.mallet.util.MalletLogger;
import cc.mallet.util.Timing;
/**
* Class for training ACRFs.
* <p/>
* <p/>
* Created: Thu Oct 16 17:53:14 2003
*
* @author <a href="mailto:[email protected]">Charles Sutton</a>
* @version $Id: DefaultAcrfTrainer.java,v 1.1 2007/10/22 21:37:43 mccallum Exp $
*/
public class DefaultAcrfTrainer implements ACRFTrainer {
private static Logger logger = MalletLogger.getLogger (DefaultAcrfTrainer.class.getName ());
private Optimizer maxer;
private static boolean rethrowExceptions = false;
public DefaultAcrfTrainer ()
{
} // ACRFTrainer constructor
private File outputPrefix = new File ("");
public void setOutputPrefix (File f)
{
outputPrefix = f;
}
public Optimizer getMaxer ()
{
return maxer;
}
public void setMaxer (Optimizer maxer)
{
this.maxer = maxer;
}
public static boolean isRethrowExceptions ()
{
return rethrowExceptions;
}
public static void setRethrowExceptions (boolean rethrowExceptions)
{
DefaultAcrfTrainer.rethrowExceptions = rethrowExceptions;
}
public boolean train (ACRF acrf, InstanceList training)
{
return train (acrf, training, null, null,
new LogEvaluator (), 1);
}
public boolean train (ACRF acrf, InstanceList training, int numIter)
{
return train (acrf, training, null, null,
new LogEvaluator (), numIter);
}
public boolean train (ACRF acrf, InstanceList training, ACRFEvaluator eval, int numIter)
{
return train (acrf, training, null, null, eval, numIter);
}
public boolean train (ACRF acrf,
InstanceList training,
InstanceList validation,
InstanceList testing,
int numIter)
{
return train (acrf, training, validation, testing,
new LogEvaluator (), numIter);
}
public boolean train (ACRF acrf,
InstanceList trainingList,
InstanceList validationList,
InstanceList testSet,
ACRFEvaluator eval,
int numIter)
{
Optimizable.ByGradientValue macrf = createMaximizable (acrf, trainingList);
return train (acrf, trainingList, validationList, testSet,
eval, numIter, macrf);
}
protected Optimizable.ByGradientValue createMaximizable (ACRF acrf, InstanceList trainingList)
{
return acrf.getMaximizable (trainingList);
}
/*
public boolean threadedTrain (ACRF acrf,
InstanceList trainingList,
InstanceList validationList,
InstanceList testSet,
ACRFEvaluator eval,
int numIter)
{
Maximizable.ByGradient macrf = acrf.getThreadedMaximizable (trainingList);
return train (dcrf, trainingList, validationList, testSet,
eval, numIter, mdcrf);
}
*/
public boolean incrementalTrain (ACRF acrf,
InstanceList training,
InstanceList validation,
InstanceList testing,
int numIter)
{
return incrementalTrain (acrf, training, validation, testing,
new LogEvaluator (), numIter);
}
private static final double[] SIZE = new double[]{0.1, 0.5};
private static final int SUBSET_ITER = 10;
public boolean incrementalTrain (ACRF acrf,
InstanceList training,
InstanceList validation,
InstanceList testing,
ACRFEvaluator eval,
int numIter)
{
long stime = new Date ().getTime ();
for (int i = 0; i < SIZE.length; i++) {
InstanceList subset = training.split (new double[]
{SIZE[i], 1 - SIZE[i]})[0];
logger.info ("Training on subset of size " + subset.size ());
Optimizable.ByGradientValue subset_macrf = createMaximizable (acrf, subset);
train (acrf, training, validation, null, eval,
SUBSET_ITER, subset_macrf);
logger.info ("Subset training " + i + " finished...");
}
long etime = new Date ().getTime ();
logger.info ("All subset training finished. Time = " + (etime - stime) + " ms.");
return train (acrf, training, validation, testing, eval, numIter);
}
public boolean train (ACRF acrf,
InstanceList trainingList,
InstanceList validationList,
InstanceList testSet,
ACRFEvaluator eval,
int numIter,
Optimizable.ByGradientValue macrf)
{
Optimizer maximizer = createMaxer (macrf);
// Maximizer.ByGradient maximizer = new BoldDriver ();
// Maximizer.ByGradient maximizer = new GradientDescent ();
boolean converged = false;
boolean resetOnError = true;
long stime = System.currentTimeMillis ();
int numNodes = (macrf instanceof ACRF.MaximizableACRF) ? ((ACRF.MaximizableACRF) macrf).getTotalNodes () : 0;
double thresh = 1e-5 * numNodes; // "early" stopping (reasonably conservative)
if (testSet == null) {
logger.warning ("ACRF trainer: No test set provided.");
}
double prevValue = Double.NEGATIVE_INFINITY;
double currentValue;
int iter;
for (iter = 0; iter < numIter; iter++) {
long etime = new java.util.Date ().getTime ();
logger.info ("ACRF trainer iteration " + iter + " at time " + (etime - stime));
try {
converged = maximizer.optimize (1);
converged |= callEvaluator (acrf, trainingList, validationList, testSet, iter, eval);
if (converged) break;
resetOnError = true;
} catch (RuntimeException e) {
e.printStackTrace ();
// If we get a maximizing error, reset LBFGS memory and try
// again. If we get an error on the second try too, then just
// give up.
if (resetOnError) {
logger.warning ("Exception in iteration " + iter + ":" + e + "\n Resetting LBFGs and trying again...");
if (maximizer instanceof LimitedMemoryBFGS) ((LimitedMemoryBFGS) maximizer).reset ();
if (maximizer instanceof ConjugateGradient) ((ConjugateGradient) maximizer).reset ();
resetOnError = false;
} else {
logger.warning ("Exception in iteration " + iter + ":" + e + "\n Quitting and saying converged...");
converged = true;
if (rethrowExceptions) throw e;
break;
}
}
if (converged) break;
// "early" stopping
currentValue = macrf.getValue ();
if (Math.abs (currentValue - prevValue) < thresh) {
// ignore cutoff if we're about to reset L-BFGS
if (resetOnError) {
logger.info ("ACRFTrainer saying converged: " +
" Current value " + currentValue + ", previous " + prevValue +
"\n...threshold was " + thresh + " = 1e-5 * " + numNodes);
converged = true;
break;
}
} else {
prevValue = currentValue;
}
}
if (iter >= numIter) {
logger.info ("ACRFTrainer: Too many iterations, stopping training. maxIter = "+numIter);
}
long etime = System.currentTimeMillis ();
logger.info ("ACRF training time (ms) = " + (etime - stime));
if (macrf instanceof MaximizableACRF) {
((MaximizableACRF) macrf).report ();
}
if ((testSet != null) && (eval != null)) {
// don't cache test set
boolean oldCache = acrf.isCacheUnrolledGraphs ();
acrf.setCacheUnrolledGraphs (false);
eval.test (acrf, testSet, "Testing");
acrf.setCacheUnrolledGraphs (oldCache);
}
return converged;
}
private Optimizer createMaxer (Optimizable.ByGradientValue macrf)
{
if (maxer == null) {
return new LimitedMemoryBFGS (macrf);
} else return maxer;
}
/**
* @return true means stop, false means keep going (opposite of evaluators... ugh!)
*/
protected boolean callEvaluator (ACRF acrf, InstanceList trainingList, InstanceList validationList,
InstanceList testSet, int iter, ACRFEvaluator eval)
{
if (eval == null) return false; // If no evaluator specified, keep going blindly
eval.setOutputPrefix (outputPrefix);
// don't cache test set
boolean wasCached = acrf.isCacheUnrolledGraphs ();
acrf.setCacheUnrolledGraphs (false);
Timing timing = new Timing ();
if (!eval.evaluate (acrf, iter+1, trainingList, validationList, testSet)) {
logger.info ("ACRF trainer: evaluator returned false. Quitting.");
timing.tick ("Evaluation time (iteration "+iter+")");
return true;
}
timing.tick ("Evaluation time (iteration "+iter+")");
// set test set caching back to normal
acrf.setCacheUnrolledGraphs (wasCached);
return false;
}
public boolean someUnsupportedTrain (ACRF acrf,
InstanceList trainingList,
InstanceList validationList,
InstanceList testSet,
ACRFEvaluator eval,
int numIter)
{
Optimizable.ByGradientValue macrf = createMaximizable (acrf, trainingList);
train (acrf, trainingList, validationList, testSet, eval, 5, macrf);
ACRF.Template[] tmpls = acrf.getTemplates ();
for (int ti = 0; ti < tmpls.length; ti++)
tmpls[ti].addSomeUnsupportedWeights (trainingList);
logger.info ("Some unsupporetd weights initialized. Training...");
return train (acrf, trainingList, validationList, testSet, eval, numIter, macrf);
}
public void test (ACRF acrf, InstanceList testing, ACRFEvaluator eval)
{
test (acrf, testing, new ACRFEvaluator[]{eval});
}
public void test (ACRF acrf, InstanceList testing, ACRFEvaluator[] evals)
{
List pred = acrf.getBestLabels (testing);
for (int i = 0; i < evals.length; i++) {
evals[i].setOutputPrefix (outputPrefix);
evals[i].test (testing, pred, "Testing");
}
}
private static final Random r = new Random (1729);
public static Random getRandom ()
{
return r;
}
public void train (ACRF acrf, InstanceList training, InstanceList validation, InstanceList testing,
ACRFEvaluator eval, double[] proportions, int iterPerProportion)
{
for (int i = 0; i < proportions.length; i++) {
double proportion = proportions[i];
InstanceList[] lists = training.split (r, new double[]{proportion, 1.0});
logger.info ("ACRF trainer: Round " + i + ", training proportion = " + proportion);
train (acrf, lists[0], validation, testing, eval, iterPerProportion);
}
logger.info ("ACRF trainer: Training on full data");
train (acrf, training, validation, testing, eval, 99999);
}
public static class LogEvaluator extends ACRFEvaluator {
private TestResults lastResults;
public LogEvaluator ()
{
}
;
public boolean evaluate (ACRF acrf, int iter,
InstanceList training,
InstanceList validation,
InstanceList testing)
{
if (shouldDoEvaluate (iter)) {
if (training != null) { test (acrf, training, "Training"); }
if (testing != null) { test (acrf, testing, "Testing"); }
}
return true;
}
public void test (InstanceList testList, List returnedList,
String description)
{
logger.info (description+": Number of instances = " + testList.size ());
TestResults results = computeTestResults (testList, returnedList);
results.log (description);
lastResults = results;
// results.printConfusion ();
}
public static TestResults computeTestResults (InstanceList testList, List returnedList)
{
TestResults results = new TestResults (testList);
Iterator it1 = testList.iterator ();
Iterator it2 = returnedList.iterator ();
while (it1.hasNext ()) {
Instance inst = (Instance) it1.next ();
// System.out.println ("\n\nInstance");
LabelsAssignment lblseq = (LabelsAssignment) inst.getTarget ();
LabelsSequence target = lblseq.getLabelsSequence ();
LabelsSequence returned = (LabelsSequence) it2.next ();
// System.out.println (target);
compareLabelings (results, returned, target);
}
results.computeStatistics ();
return results;
}
static void compareLabelings (TestResults results,
LabelsSequence returned,
LabelsSequence target)
{
assert returned.size () == target.size ();
for (int i = 0; i < returned.size (); i++) {
// System.out.println ("Time "+i);
Labels lblsReturned = returned.getLabels (i);
Labels lblsTarget = target.getLabels (i);
results.incrementCount (lblsReturned, lblsTarget);
}
}
public double getJointAccuracy ()
{
return lastResults.getJointAccuracy ();
}
}
public static class FileEvaluator extends ACRFEvaluator {
private File file;
public FileEvaluator (File file)
{
this.file = file;
}
;
public boolean evaluate (ACRF acrf, int iter,
InstanceList training,
InstanceList validation,
InstanceList testing)
{
if (shouldDoEvaluate (iter)) {
test (acrf, testing, "Testing ");
}
return true;
}
public void test (InstanceList testList, List returnedList,
String description)
{
logger.info ("Number of testing instances = " + testList.size ());
TestResults results = LogEvaluator.computeTestResults (testList, returnedList);
try {
PrintWriter writer = new PrintWriter (new FileWriter (file, true));
results.print (description, writer);
writer.close ();
} catch (Exception e) {
e.printStackTrace ();
}
// results.printConfusion ();
}
}
public static class TestResults {
public int[][] confusion; // Confusion matrix
public int numClasses;
// Marginals of confusion matrix
public int[] trueCounts;
public int[] returnedCounts;
// Per-class precision, recall, and F1.
public double[] precision;
public double[] recall;
public double[] f1;
// Measuring accuracy of each factor
public TIntArrayList[] factors;
// Measuring joint accuracy
public int maxT = 0;
public int correctT = 0;
public Alphabet alphabet;
TestResults (InstanceList ilist)
{
this (ilist.get (0));
}
TestResults (Instance inst)
{
alphabet = new Alphabet ();
setupAlphabet (inst);
numClasses = alphabet.size ();
confusion = new int [numClasses][numClasses];
precision = new double [numClasses];
recall = new double [numClasses];
f1 = new double [numClasses];
}
// This isn't pretty, but I swear there's
// not an easy way...
private void setupAlphabet (Instance inst)
{
LabelsAssignment lblseq = (LabelsAssignment) inst.getTarget ();
factors = new TIntArrayList [lblseq.numSlices ()];
for (int i = 0; i < lblseq.numSlices (); i++) {
LabelAlphabet dict = lblseq.getOutputAlphabet (i);
factors[i] = new TIntArrayList (dict.size ());
for (int j = 0; j < dict.size (); j++) {
int idx = alphabet.lookupIndex (dict.lookupObject (j));
factors[i].add (idx);
}
}
}
void incrementCount (Labels lblsReturned, Labels lblsTarget)
{
boolean allSame = true;
// and per-label accuracy
for (int j = 0; j < lblsReturned.size (); j++) {
Label lret = lblsReturned.get (j);
Label ltarget = lblsTarget.get (j);
// System.out.println(ltarget+" vs. "+lret);
int idxTrue = alphabet.lookupIndex (ltarget.getEntry ());
int idxRet = alphabet.lookupIndex (lret.getEntry ());
if (idxTrue != idxRet) allSame = false;
confusion[idxTrue][idxRet]++;
}
// Measure joint accuracy
maxT++;
if (allSame) correctT++;
}
void computeStatistics ()
{
// Compute marginals of confusion matrix.
// Assumes that confusion[i][j] means true label i and
// returned label j
trueCounts = new int [numClasses];
returnedCounts = new int [numClasses];
for (int i = 0; i < numClasses; i++) {
for (int j = 0; j < numClasses; j++) {
trueCounts[i] += confusion[i][j];
returnedCounts[j] += confusion[i][j];
}
}
// Compute per-class precision, recall, and F1
for (int i = 0; i < numClasses; i++) {
double correct = confusion[i][i];
if (returnedCounts[i] == 0) {
precision[i] = (correct == 0) ? 1.0 : 0.0;
} else {
precision[i] = correct / returnedCounts[i];
}
if (trueCounts[i] == 0) {
recall[i] = 1.0;
} else {
recall[i] = correct / trueCounts[i];
}
f1[i] = (2 * precision[i] * recall[i]) / (precision[i] + recall[i]);
}
}
public void log ()
{
log ("");
}
public void log (String desc)
{
logger.info (desc+": i\tLabel\tN\tCorrect\tReturned\tP\tR\tF1");
for (int i = 0; i < numClasses; i++) {
logger.info (desc+": "+i + "\t" + alphabet.lookupObject (i) + "\t"
+ trueCounts[i] + "\t"
+ confusion[i][i] + "\t"
+ returnedCounts[i] + "\t"
+ precision[i] + "\t"
+ recall[i] + "\t"
+ f1[i] + "\t");
}
for (int fnum = 0; fnum < factors.length; fnum++) {
int correct = 0;
int returned = 0;
for (int i = 0; i < factors[fnum].size (); i++) {
int lbl = factors[fnum].get (i);
correct += confusion[lbl][lbl];
returned += returnedCounts[lbl];
}
logger.info (desc + ": Factor " + fnum + " accuracy: (" + correct + " " + returned + ") "
+ (correct / ((double) returned)));
}
logger.info (desc + " CorrectT " + correctT + " maxt " + maxT);
logger.info (desc + " Joint accuracy: " + ((double) correctT) / maxT);
}
public void print (String desc, PrintWriter out)
{
out.println ("i\tLabel\tN\tCorrect\tReturned\tP\tR\tF1");
for (int i = 0; i < numClasses; i++) {
out.println (i + "\t" + alphabet.lookupObject (i) + "\t"
+ trueCounts[i] + "\t"
+ confusion[i][i] + "\t"
+ returnedCounts[i] + "\t"
+ precision[i] + "\t"
+ recall[i] + "\t"
+ f1[i] + "\t");
}
for (int fnum = 0; fnum < factors.length; fnum++) {
int correct = 0;
int returned = 0;
for (int i = 0; i < factors[fnum].size (); i++) {
int lbl = factors[fnum].get (i);
correct += confusion[lbl][lbl];
returned += returnedCounts[lbl];
}
out.println (desc + " Factor " + fnum + " accuracy: (" + correct + " " + returned + ") "
+ (correct / ((double) returned)));
}
out.println (desc + " CorrectT " + correctT + " maxt " + maxT);
out.println (desc + " Joint accuracy: " + ((double) correctT) / maxT);
}
void printConfusion ()
{
System.out.println ("True\t\tReturned\tCount");
for (int i = 0; i < numClasses; i++) {
for (int j = 0; j < numClasses; j++) {
System.out.println (i + "\t\t" + j + "\t" + confusion[i][j]);
}
}
}
public double getJointAccuracy ()
{
return ((double) correctT) / maxT;
}
} // TestResults
} // ACRFTrainer
| 21,109 | 30.984848 | 114 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/learning/GenericAcrfTui.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.learning;
/**
*
* Created: Aug 23, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: GenericAcrfTui.java,v 1.1 2007/10/22 21:37:43 mccallum Exp $
*/
import bsh.EvalError;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.logging.Logger;
import java.util.regex.Pattern;
import cc.mallet.grmm.inference.Inferencer;
import cc.mallet.pipe.*;
import cc.mallet.pipe.iterator.LineGroupIterator;
import cc.mallet.pipe.iterator.PipeInputIterator;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Instance;
import cc.mallet.util.*;
public class GenericAcrfTui {
private static CommandOption.File modelFile = new CommandOption.File
(GenericAcrfTui.class, "model-file", "FILENAME", true, null, "Text file describing model structure.", null);
private static CommandOption.File trainFile = new CommandOption.File
(GenericAcrfTui.class, "training", "FILENAME", true, null, "File containing training data.", null);
private static CommandOption.File testFile = new CommandOption.File
(GenericAcrfTui.class, "testing", "FILENAME", true, null, "File containing testing data.", null);
private static CommandOption.Integer numLabelsOption = new CommandOption.Integer
(GenericAcrfTui.class, "num-labels", "INT", true, -1,
"If supplied, number of labels on each line of input file." +
" Otherwise, the token ---- must separate labels from features.", null);
private static CommandOption.String inferencerOption = new CommandOption.String
(GenericAcrfTui.class, "inferencer", "STRING", true, "TRP",
"Specification of inferencer.", null);
private static CommandOption.String maxInferencerOption = new CommandOption.String
(GenericAcrfTui.class, "max-inferencer", "STRING", true, "TRP.createForMaxProduct()",
"Specification of inferencer.", null);
private static CommandOption.String evalOption = new CommandOption.String
(GenericAcrfTui.class, "eval", "STRING", true, "LOG",
"Evaluator to use. Java code grokking performed.", null);
static CommandOption.Boolean cacheUnrolledGraph = new CommandOption.Boolean
(GenericAcrfTui.class, "cache-graphs", "true|false", true, false,
"Whether to use memory-intensive caching.", null);
static CommandOption.Boolean useTokenText = new CommandOption.Boolean
(GenericAcrfTui.class, "use-token-text", "true|false", true, false,
"Set this to true if first feature in every list is should be considered the text of the " +
"current token. This is used for NLP-specific debugging and error analysis.", null);
static CommandOption.Integer randomSeedOption = new CommandOption.Integer
(GenericAcrfTui.class, "random-seed", "INTEGER", true, 0,
"The random seed for randomly selecting a proportion of the instance list for training", null);
private static BshInterpreter interpreter = setupInterpreter ();
public static void main (String[] args) throws IOException, EvalError
{
doProcessOptions (GenericAcrfTui.class, args);
Timing timing = new Timing ();
GenericAcrfData2TokenSequence basePipe;
if (!numLabelsOption.wasInvoked ()) {
basePipe = new GenericAcrfData2TokenSequence ();
} else {
basePipe = new GenericAcrfData2TokenSequence (numLabelsOption.value);
}
basePipe.setFeaturesIncludeToken(useTokenText.value);
basePipe.setIncludeTokenText(useTokenText.value);
Pipe pipe = new SerialPipes (new Pipe[] {
basePipe,
new TokenSequence2FeatureVectorSequence (true, true),
});
Iterator<Instance> trainSource = new LineGroupIterator (new FileReader (trainFile.value), Pattern.compile ("^\\s*$"), true);
Iterator<Instance> testSource;
if (testFile.wasInvoked ()) {
testSource = new LineGroupIterator (new FileReader (testFile.value), Pattern.compile ("^\\s*$"), true);
} else {
testSource = null;
}
InstanceList training = new InstanceList (pipe);
training.addThruPipe (trainSource);
InstanceList testing = new InstanceList (pipe);
testing.addThruPipe (testSource);
ACRF.Template[] tmpls = parseModelFile (modelFile.value);
ACRFEvaluator eval = createEvaluator (evalOption.value);
Inferencer inf = createInferencer (inferencerOption.value);
Inferencer maxInf = createInferencer (maxInferencerOption.value);
ACRF acrf = new ACRF (pipe, tmpls);
acrf.setInferencer (inf);
acrf.setViterbiInferencer (maxInf);
ACRFTrainer trainer = new DefaultAcrfTrainer ();
trainer.train (acrf, training, null, testing, eval, 9999);
timing.tick ("Training");
FileUtils.writeGzippedObject (new File ("acrf.ser.gz"), acrf);
timing.tick ("Serializing");
System.out.println ("Total time (ms) = " + timing.elapsedTime ());
}
private static BshInterpreter setupInterpreter ()
{
BshInterpreter interpreter = CommandOption.getInterpreter ();
try {
interpreter.eval ("import edu.umass.cs.mallet.base.extract.*");
interpreter.eval ("import edu.umass.cs.mallet.grmm.inference.*");
interpreter.eval ("import edu.umass.cs.mallet.grmm.learning.*");
interpreter.eval ("import edu.umass.cs.mallet.grmm.learning.templates.*");
} catch (EvalError e) {
throw new RuntimeException (e);
}
return interpreter;
}
public static ACRFEvaluator createEvaluator (String spec) throws EvalError
{
if (spec.indexOf ('(') >= 0) {
// assume it's Java code, and don't screw with it.
return (ACRFEvaluator) interpreter.eval (spec);
} else {
LinkedList toks = new LinkedList (Arrays.asList (spec.split ("\\s+")));
return createEvaluator (toks);
}
}
private static ACRFEvaluator createEvaluator (LinkedList toks)
{
String type = (String) toks.removeFirst ();
if (type.equalsIgnoreCase ("SEGMENT")) {
int slice = Integer.parseInt ((String) toks.removeFirst ());
if (toks.size() % 2 != 0)
throw new RuntimeException ("Error in --eval "+evalOption.value+": Every start tag must have a continue.");
int numTags = toks.size () / 2;
String[] startTags = new String [numTags];
String[] continueTags = new String [numTags];
for (int i = 0; i < numTags; i++) {
startTags[i] = (String) toks.removeFirst ();
continueTags[i] = (String) toks.removeFirst ();
}
return new MultiSegmentationEvaluatorACRF (startTags, continueTags, slice);
} else if (type.equalsIgnoreCase ("LOG")) {
return new DefaultAcrfTrainer.LogEvaluator ();
} else if (type.equalsIgnoreCase ("SERIAL")) {
List evals = new ArrayList ();
while (!toks.isEmpty ()) {
evals.add (createEvaluator (toks));
}
return new AcrfSerialEvaluator (evals);
} else {
throw new RuntimeException ("Error in --eval "+evalOption.value+": illegal evaluator "+type);
}
}
private static Inferencer createInferencer (String spec) throws EvalError
{
String cmd;
if (spec.indexOf ('(') >= 0) {
// assume it's Java code, and don't screw with it.
cmd = spec;
} else {
cmd = "new "+spec+"()";
}
// Return whatever the Java code says to
Object inf = interpreter.eval (cmd);
if (inf instanceof Inferencer)
return (Inferencer) inf;
else throw new RuntimeException ("Don't know what to do with inferencer "+inf);
}
public static void doProcessOptions (Class childClass, String[] args)
{
CommandOption.List options = new CommandOption.List ("", new CommandOption[0]);
options.add (childClass);
options.process (args);
options.logOptions (Logger.getLogger (""));
}
private static ACRF.Template[] parseModelFile (File mdlFile) throws IOException, EvalError
{
BufferedReader in = new BufferedReader (new FileReader (mdlFile));
List tmpls = new ArrayList ();
String line = in.readLine ();
while (line != null) {
Object tmpl = interpreter.eval (line);
if (!(tmpl instanceof ACRF.Template)) {
throw new RuntimeException ("Error in "+mdlFile+" line "+in.toString ()+":\n Object "+tmpl+" not a template");
}
tmpls.add (tmpl);
line = in.readLine ();
}
return (ACRF.Template[]) tmpls.toArray (new ACRF.Template [0]);
}
}
| 9,065 | 36.46281 | 128 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/learning/GenericAcrfData2TokenSequence.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.learning;
import java.util.ArrayList;
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.io.ObjectInputStream;
import cc.mallet.extract.StringSpan;
import cc.mallet.extract.StringTokenization;
import cc.mallet.grmm.util.LabelsAssignment;
import cc.mallet.pipe.Pipe;
import cc.mallet.types.*;
/**
* Generic pipe that takes a linegroup of the form:
* <pre>
* LABEL1 LABEL2 ... LABELk word feature1 feature2 ... featuren
* </pre>
* and converts it into an input FeatureVectorSequence and target LabelsSequence.
* <p>
* If the number of labels at each sequence position could vary, then use this format instead:
* <pre>
* LABEL1 LABEL2 ... LABELk ---- word feature1 feature2 ... featuren
* </pre>
* The four dashes ---- must be there to separate the features from the labels.
* Whitespace is ignored.
* The difference between this pipe and {@link edu.umass.cs.iesl.casutton.experiments.dcrf.GenericDcrfPipe} is that this pipe
* allows for a different number of labels at each sequence position.
* <p>
* Explicitly specifying which word is the token allows the use of the HTML output from
* the extract package.
*
* Created: Aug 22, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: GenericAcrfData2TokenSequence.java,v 1.1 2007/10/22 21:37:43 mccallum Exp $
*/
public class GenericAcrfData2TokenSequence extends Pipe {
private ArrayList labelDicts = new ArrayList ();
private int numLabels = -1;
private boolean includeTokenText = true;
private String textFeaturePrefix = "WORD=";
private boolean featuresIncludeToken = true;
private boolean labelsAtEnd = false;
public GenericAcrfData2TokenSequence ()
{
super (new Alphabet(), new LabelAlphabet());
}
public GenericAcrfData2TokenSequence (int numLabels)
{
super (new Alphabet(), new LabelAlphabet());
this.numLabels = numLabels;
}
public void setIncludeTokenText (boolean includeTokenText)
{
this.includeTokenText = includeTokenText;
}
/**
* If true, then the first feature in the list is considered to be the token's text.
* If false, then no feature is designated as the token text.
* @param featuresIncludeToken
*/
public void setFeaturesIncludeToken (boolean featuresIncludeToken)
{
this.featuresIncludeToken = featuresIncludeToken;
}
public boolean getFeaturesIncludeToken ()
{
return featuresIncludeToken;
}
public void setTextFeaturePrefix (String textFeaturePrefix)
{
this.textFeaturePrefix = textFeaturePrefix;
}
public LabelAlphabet getLabelAlphabet (int lvl)
{
return (LabelAlphabet) labelDicts.get (lvl);
}
public int numLevels ()
{
return labelDicts.size();
}
public Instance pipe (Instance carrier)
{
String input;
if (carrier.getData () instanceof CharSequence) {
input = String.valueOf(carrier.getData ());
} else {
throw new ClassCastException("Needed a String; got "+carrier.getData());
}
String[] lines = input.split ("\n");
StringSpan[] spans = new StringSpan[lines.length];
Labels[] lbls = new Labels[lines.length];
StringBuffer buf = new StringBuffer ();
Alphabet dict = getDataAlphabet ();
for (int i = 0; i < lines.length; i++) {
String line = lines[i];
String[] toks = line.split ("\\s+");
int j = 0;
ArrayList thisLabels = new ArrayList ();
if (!labelsAtEnd) {
while (!isLabelSeparator (toks, j)) {
thisLabels.add (labelForTok (toks[j], j));
j++;
}
if ((j < toks.length) && toks[j].equals ("----")) j++;
lbls[i] = new Labels ((Label[]) thisLabels.toArray (new Label[thisLabels.size ()]));
}
int maxFeatureIdx = (labelsAtEnd) ? toks.length - numLabels : toks.length;
String text = "*???*";
if (featuresIncludeToken) {
if (j < maxFeatureIdx) {
text = toks [j++];
}
}
int start = buf.length ();
buf.append (text);
int end = buf.length ();
buf.append (" ");
StringSpan span = new StringSpan (buf, start, end);
while (j < maxFeatureIdx) {
span.setFeatureValue (toks[j].intern (), 1.0);
j++;
}
if (includeTokenText) {
span.setFeatureValue ((textFeaturePrefix+text).intern(), 1.0);
}
if (labelsAtEnd) {
int firstLblIdx = j;
while (j < toks.length) {
thisLabels.add (labelForTok (toks[j], j - firstLblIdx));
j++;
}
lbls[i] = new Labels ((Label[]) thisLabels.toArray (new Label[thisLabels.size ()]));
}
spans[i] = span;
}
StringTokenization tokenization = new StringTokenization (buf);
tokenization.addAll (spans);
carrier.setData (tokenization);
carrier.setTarget (new LabelsAssignment (new LabelsSequence (lbls)));
return carrier;
}
private Label labelForTok (String tok, int lvl)
{
while (labelDicts.size() <= lvl) {
labelDicts.add (new LabelAlphabet ());
}
LabelAlphabet dict = (LabelAlphabet) labelDicts.get (lvl);
return dict.lookupLabel (tok);
}
private boolean isLabelSeparator (String[] toks, int j)
{
if (numLabels > 0) {
// if fixed numLabels, just return whether we have enough.
return j >= numLabels;
} else {
// otherwise, use the dynamic labels separator
return toks[j].equals ("----");
}
}
// Serialization garbage
// version 1.0 == returned a feature vector sequence
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 2;
private void writeObject (ObjectOutputStream out) throws IOException
{
out.defaultWriteObject ();
out.writeInt (CURRENT_SERIAL_VERSION);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException
{
in.defaultReadObject ();
int version = in.readInt ();
if (version <= 1) {
featuresIncludeToken = true;
}
}
public boolean isLabelsAtEnd ()
{
return labelsAtEnd;
}
public void setLabelsAtEnd (boolean labelsAtEnd)
{
this.labelsAtEnd = labelsAtEnd;
}
}
| 6,631 | 27.834783 | 125 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/learning/MultiSegmentationEvaluatorACRF.java | /* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
Evaluate segmentation f1 for several different tags (marked in OIB format).
For example, tags might be B-PERSON I-PERSON O B-LOCATION I-LOCATION O...
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.grmm.learning; // Generated package name
import java.util.logging.*;
import java.text.DecimalFormat;
import java.util.List;
import cc.mallet.types.*;
import cc.mallet.util.MalletLogger;
public class MultiSegmentationEvaluatorACRF extends ACRFEvaluator
{
private static Logger logger = MalletLogger.getLogger(MultiSegmentationEvaluatorACRF.class.getName());
// equals() is called on these objects to determine if this token is the start or continuation of a segment.
// A tag not equal to any of these is an "other".
// is not part of the segment).
Object[] segmentStartTags;
Object[] segmentContinueTags;
Object[] segmentStartOrContinueTags;
private int evalIterations = 0;
private int slice = 0;
/** This class WILL NOT WORK if segmentStartTags and segmentContinueTags are the same!! */
public MultiSegmentationEvaluatorACRF (Object[] segmentStartTags, Object[] segmentContinueTags, boolean showViterbi)
{
this.segmentStartTags = segmentStartTags;
this.segmentContinueTags = segmentContinueTags;
assert (segmentStartTags.length == segmentContinueTags.length);
}
/** This class WILL NOT WORK if segmentStartTags and segmentContinueTags are the same!! */
public MultiSegmentationEvaluatorACRF (Object[] segmentStartTags, Object[] segmentContinueTags)
{
this(segmentStartTags, segmentContinueTags, true);
}
public MultiSegmentationEvaluatorACRF (Object[] segmentStartTags, Object[] segmentContinueTags, int slice)
{
this(segmentStartTags, segmentContinueTags, true);
this.slice = slice;
}
private LabelSequence slice (LabelsSequence lseq, int k)
{
Label[] arr = new Label [lseq.size()];
for (int i = 0; i < lseq.size(); i++) {
arr [i] = lseq.getLabels (i).get (k);
}
return new LabelSequence (arr);
}
public boolean evaluate (ACRF acrf, int iter,
InstanceList training,
InstanceList validation,
InstanceList testing)
{
// Don't evaluate if it is too early in training to matter
if (!shouldDoEvaluate (iter)) return true;
InstanceList[] lists = new InstanceList[] {training, validation, testing};
String[] listnames = new String[] {"Training", "Validation", "Testing"};
for (int k = 0; k < lists.length; k++)
if (lists[k] != null) {
test (acrf, lists[k], listnames[k]);
}
return true;
}
public void test(InstanceList gold, List returned, String description)
{
TestResults results = new TestResults (segmentStartTags, segmentContinueTags);
for (int i = 0; i < gold.size(); i++) {
Instance instance = gold.get(i);
Sequence trueOutput = processTrueOutput ((Sequence) instance.getTarget());
Sequence predOutput = slice ((LabelsSequence) returned.get (i), slice);
assert (predOutput.size() == trueOutput.size());
results.incrementCounts (trueOutput, predOutput);
}
results.logResults (description);
}
private Sequence processTrueOutput (Sequence sequence)
{
if (sequence instanceof LabelsSequence) {
LabelsSequence lseq = (LabelsSequence) sequence;
return slice (lseq, slice);
} else {
return sequence;
}
}
public static class TestResults
{
private Object[] segmentStartTags, segmentContinueTags;
private int numCorrectTokens, totalTokens;
private int[] numTrueSegments, numPredictedSegments, numCorrectSegments;
private int allIndex;
public TestResults (Object[] segmentStartTags, Object[] segmentContinueTags)
{
this.segmentStartTags = segmentStartTags;
this.segmentContinueTags = segmentContinueTags;
allIndex = segmentStartTags.length;
numTrueSegments = new int[allIndex+1];
numPredictedSegments = new int[allIndex+1];
numCorrectSegments = new int[allIndex+1];
TokenSequence sourceTokenSequence = null;
totalTokens = numCorrectTokens = 0;
for (int n = 0; n < numTrueSegments.length; n++)
numTrueSegments[n] = numPredictedSegments[n] = numCorrectSegments[n] = 0;
}
public void logResults (String description)
{
DecimalFormat f = new DecimalFormat ("0.####");
logger.info (description +" tokenaccuracy="+f.format(((double)numCorrectTokens)/totalTokens));
for (int n = 0; n < numCorrectSegments.length; n++) {
logger.info ((n < allIndex ? segmentStartTags[n].toString() : "OVERALL") +' ');
double precision = numPredictedSegments[n] == 0 ? 1 : ((double)numCorrectSegments[n]) / numPredictedSegments[n];
double recall = numTrueSegments[n] == 0 ? 1 : ((double)numCorrectSegments[n]) / numTrueSegments[n];
double f1 = recall+precision == 0.0 ? 0.0 : (2.0 * recall * precision) / (recall + precision);
logger.info (" segments true="+numTrueSegments[n]+" pred="+numPredictedSegments[n]+" correct="+numCorrectSegments[n]+
" misses="+(numTrueSegments[n]-numCorrectSegments[n])+" alarms="+(numPredictedSegments[n]-numCorrectSegments[n]));
logger.info (" precision="+f.format(precision)+" recall="+f.format(recall)+" f1="+f.format(f1));
}
}
public void incrementCounts (Sequence trueOutput, Sequence predOutput)
{
int trueStart, predStart; // -1 for non-start, otherwise index into segmentStartTag
for (int j = 0; j < trueOutput.size(); j++) {
totalTokens++;
String trueToken = trueOutput.get(j).toString ();
String predToken = predOutput.get(j).toString ();
if (trueToken.equals (predToken)) {
numCorrectTokens++;
}
trueStart = predStart = -1;
// Count true segment starts
for (int n = 0; n < segmentStartTags.length; n++) {
if (segmentStartTags[n].equals(trueToken)) {
numTrueSegments[n]++;
numTrueSegments[allIndex]++;
trueStart = n;
break;
}
}
// Count predicted segment starts
for (int n = 0; n < segmentStartTags.length; n++) {
if (segmentStartTags[n].equals(predOutput.get(j))) {
numPredictedSegments[n]++;
numPredictedSegments[allIndex]++;
predStart = n;
}
}
if (trueStart != -1 && trueStart == predStart) {
// Truth and Prediction both agree that the same segment tag-type is starting now
int m;
boolean trueContinue = false;
boolean predContinue = false;
for (m = j+1; m < trueOutput.size(); m++) {
String trueTokenCtd = trueOutput.get (m).toString ();
String predTokenCtd = predOutput.get (m).toString ();
trueContinue = segmentContinueTags[predStart].equals (trueTokenCtd);
predContinue = segmentContinueTags[predStart].equals (predTokenCtd);
if (!trueContinue || !predContinue) {
if (trueContinue == predContinue) {
// They agree about a segment is ending somehow
numCorrectSegments[predStart]++;
numCorrectSegments[allIndex]++;
}
break;
}
}
// for the case of the end of the sequence
if (m == trueOutput.size()) {
if (trueContinue == predContinue) {
numCorrectSegments[predStart]++;
numCorrectSegments[allIndex]++;
}
}
}
}
}
}
}
| 7,947 | 35.458716 | 135 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/learning/templates/SimilarTokensTemplate.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.learning.templates;
import gnu.trove.THashMap;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.Iterator;
import java.util.List;
import java.util.regex.Pattern;
import java.util.regex.Matcher;
import cc.mallet.grmm.learning.ACRF;
import cc.mallet.grmm.types.Variable;
import cc.mallet.grmm.util.LabelsAssignment;
import cc.mallet.grmm.util.THashMultiMap;
import cc.mallet.types.*;
/**
* Template for adding "skip edges" as in
*
* @author Charles Sutton
* @version $Id: SimilarTokensTemplate.java,v 1.1 2007/10/22 21:38:02 mccallum Exp $
*/
// Copied from TUIacrf
public class SimilarTokensTemplate extends ACRF.SequenceTemplate {
private static final boolean debug = false;
private static class TokenInfo {
String featureName;
FeatureVector fv;
int pos;
public TokenInfo (String featureName, FeatureVector fv, int pos)
{
this.featureName = featureName;
this.fv = fv;
this.pos = pos;
}
}
private int factor;
private boolean distinguishEndpts = false;
private boolean wordFeaturesOnly = false;
private boolean excludeAdjacent = true;
private FeatureVectorBinner binner;
// Maps FeatureVectorSequence ==> THashMultiMap<String,TokenInfo>
private transient THashMap instanceCache = new THashMap ();
public SimilarTokensTemplate (int factor)
{
this (factor, false);
}
public SimilarTokensTemplate (int factor, boolean distinguishEndpoints)
{
this (factor, distinguishEndpoints, false, new CapWordsBinner ());
}
public SimilarTokensTemplate (int factor, boolean distinguishEndpoints, boolean wordFeaturesOnly)
{
this (factor, distinguishEndpoints, wordFeaturesOnly, new CapWordsBinner ());
}
public SimilarTokensTemplate (int factor, boolean distinguishEndpoints, FeatureVectorBinner binner)
{
this (factor, distinguishEndpoints, false, binner);
}
public SimilarTokensTemplate (int factor, boolean distinguishEndpoints, boolean wordFeaturesOnly, FeatureVectorBinner binner)
{
this.factor = factor;
this.distinguishEndpts = distinguishEndpoints;
this.wordFeaturesOnly = wordFeaturesOnly;
this.binner = binner;
}
public void addInstantiatedCliques (ACRF.UnrolledGraph graph,
FeatureVectorSequence fvs,
LabelsAssignment lblseq)
{
THashMultiMap fvByWord = constructFvByWord (fvs);
int numSkip = 0;
for (Iterator it = fvByWord.keySet ().iterator (); it.hasNext ();) {
String wordFeature = (String) it.next ();
List infoList = (List) fvByWord.get (wordFeature);
int N = infoList.size ();
if (debug && N > 1) System.err.print ("Processing list of size "+N+" ("+wordFeature+")");
for (int i = 0; i < N; i++) {
for (int j = i + 1; j < N; j++) {
TokenInfo info1 = (TokenInfo) infoList.get (i);
TokenInfo info2 = (TokenInfo) infoList.get (j);
Variable v1 = lblseq.varOfIndex (info1.pos, factor);
Variable v2 = lblseq.varOfIndex (info2.pos, factor);
if (excludeAdjacent && (Math.abs(info1.pos - info2.pos) <= 1)) continue;
Variable[] vars = new Variable[]{v1, v2};
assert v1 != null : "Couldn't get label factor " + factor + " time " + i;
assert v2 != null : "Couldn't get label factor " + factor + " time " + j;
FeatureVector fv = combineFv (wordFeature, info1.fv, info2.fv);
ACRF.UnrolledVarSet clique = new ACRF.UnrolledVarSet (graph, this, vars, fv);
graph.addClique (clique);
numSkip++;
// System.out.println ("Adding "+info1.pos+" --- "+info2.pos);
/* Insanely verbose
if (debug) {
System.err.println ("Combining:\n "+info1.fv+"\n "+info2.fv);
}
*/
}
}
if (debug && N > 1) System.err.println ("...done.");
}
System.err.println ("SimilarTokensTemplate: Total skip edges = "+numSkip);
}
private THashMultiMap constructFvByWord (FeatureVectorSequence fvs)
{
THashMultiMap fvByWord = new THashMultiMap (fvs.size ());
int N = fvs.size ();
for (int t = 0; t < N; t++) {
FeatureVector fv = fvs.getFeatureVector (t);
String wordFeature = binner.computeBin (fv);
if (wordFeature != null) { // could happen if the current word has been excluded
fvByWord.put (wordFeature, new TokenInfo (wordFeature, fv, t));
}
}
return fvByWord;
}
private FeatureVector combineFv (String word, FeatureVector fv1, FeatureVector fv2)
{
// System.out.println("combineFv:");
// System.out.println("FV1 values "+fv1.getValues()+" indices "+fv1.getIndices());
// System.out.println("FV1: "+fv1.toString (true));
// System.out.println("FV2 values "+fv2.getValues()+" indices "+fv2.getIndices());
// System.out.println("FV2:"+fv2.toString (true));
Alphabet dict = fv1.getAlphabet ();
AugmentableFeatureVector afv = new AugmentableFeatureVector (dict, true);
if (wordFeaturesOnly) {
int idx = dict.lookupIndex (word);
afv.add (idx, 1.0);
} else if (distinguishEndpts) {
afv.add (fv1, "S:");
afv.add (fv2, "E:");
} else {
afv.add (fv1);
afv.add (fv2);
}
// System.out.println("AFV: "+afv.toString (true));
return afv;
}
// Customization
/** Interface for classes that ssigns each features vector to a String-valued bin.
* Feature vectors is the same bin are assumed to be similar, so that they need a skip edge.
* In this way the similarity metric used for generating skip edges can be completely customized.
*/
public static interface FeatureVectorBinner {
String computeBin (FeatureVector fv);
}
public static class WordFeatureBinner implements FeatureVectorBinner, Serializable {
private Pattern findWordPtn1 = Pattern.compile("WORD=(.*)");
private Pattern findWordPtn2 = Pattern.compile("W=(.*)");
private Pattern findWordExcludePtn = Pattern.compile (".*(?:@-?\\d+|_&_).*");
private Pattern wordIncludePattern = null;
public WordFeatureBinner () { }
public WordFeatureBinner (Pattern wordIncludePattern)
{
this.wordIncludePattern = wordIncludePattern;
}
public String computeBin (FeatureVector fv)
{
String text = intuitTokenText (fv);
if (text != null) {
if (wordIncludePattern == null || wordIncludePattern.matcher(text).matches ()) {
return text;
}
}
return null;
}
private String intuitTokenText (FeatureVector fv)
{
Alphabet dict = fv.getAlphabet ();
for (int loc = 0; loc < fv.numLocations (); loc++) {
int idx = fv.indexAtLocation (loc);
String fname = String.valueOf (dict.lookupObject (idx));
Matcher matcher;
if ((matcher = findWordPtn1.matcher (fname)).matches ()) {
if (!findWordExcludePtn.matcher (fname).matches ()) {
return matcher.group (1);
}
} else if ((findWordPtn2 != null) && (matcher = findWordPtn2.matcher (fname)).matches ()) {
if (!findWordExcludePtn.matcher (fname).matches ()) {
return matcher.group (1);
}
}
}
return null;
}
// Serialization garbage
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 2;
private void writeObject (ObjectOutputStream out) throws IOException
{
out.defaultWriteObject ();
out.writeInt (CURRENT_SERIAL_VERSION);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException
{
in.defaultReadObject ();
int version = in.readInt ();
if (version == 1) {
throw new RuntimeException ();
}
}
}
public static class CapWordsBinner extends WordFeatureBinner {
public CapWordsBinner ()
{
super (Pattern.compile ("[A-Z][A-Za-z]*"));
}
}
public void setBinner (FeatureVectorBinner binner)
{
this.binner = binner;
}
public boolean isExcludeAdjacent ()
{
return excludeAdjacent;
}
public void setExcludeAdjacent (boolean excludeAdjacent)
{
this.excludeAdjacent = excludeAdjacent;
}
public boolean isDistinguishEndpts ()
{
return distinguishEndpts;
}
public void setDistinguishEndpts (boolean distinguishEndpts)
{
this.distinguishEndpts = distinguishEndpts;
}
// Serialization garbage
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 2;
private void writeObject (ObjectOutputStream out) throws IOException
{
out.defaultWriteObject ();
out.writeInt (CURRENT_SERIAL_VERSION);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException
{
in.defaultReadObject ();
int version = in.readInt ();
instanceCache = new THashMap ();
}
}
| 9,479 | 29.095238 | 127 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/learning/extract/AcrfExtractorTui.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.learning.extract;
/**
*
* Created: Aug 23, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: AcrfExtractorTui.java,v 1.1 2007/10/22 21:38:02 mccallum Exp $
*/
import bsh.EvalError;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.logging.Logger;
import java.util.regex.Pattern;
import cc.mallet.extract.Extraction;
import cc.mallet.extract.ExtractionEvaluator;
import cc.mallet.grmm.inference.Inferencer;
import cc.mallet.grmm.learning.*;
import cc.mallet.pipe.*;
import cc.mallet.pipe.iterator.FileListIterator;
import cc.mallet.pipe.iterator.LineGroupIterator;
import cc.mallet.pipe.iterator.PipeInputIterator;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.util.*;
public class AcrfExtractorTui {
private static final Logger logger = MalletLogger.getLogger (AcrfExtractorTui.class.getName ());
private static CommandOption.File outputPrefix = new CommandOption.File
(AcrfExtractorTui.class, "output-prefix", "FILENAME", true, null,
"Directory to write saved model to.", null);
private static CommandOption.File modelFile = new CommandOption.File
(AcrfExtractorTui.class, "model-file", "FILENAME", true, null, "Text file describing model structure.", null);
private static CommandOption.File trainFile = new CommandOption.File
(AcrfExtractorTui.class, "training", "FILENAME", true, null, "File containing training data.", null);
private static CommandOption.File testFile = new CommandOption.File
(AcrfExtractorTui.class, "testing", "FILENAME", true, null, "File containing testing data.", null);
private static CommandOption.Integer numLabelsOption = new CommandOption.Integer
(AcrfExtractorTui.class, "num-labels", "INT", true, -1,
"If supplied, number of labels on each line of input file." +
" Otherwise, the token ---- must separate labels from features.", null);
private static CommandOption.String trainerOption = new CommandOption.String
(AcrfExtractorTui.class, "trainer", "STRING", true, "ACRFExtractorTrainer",
"Specification of trainer type.", null);
private static CommandOption.String inferencerOption = new CommandOption.String
(AcrfExtractorTui.class, "inferencer", "STRING", true, "LoopyBP",
"Specification of inferencer.", null);
private static CommandOption.String maxInferencerOption = new CommandOption.String
(AcrfExtractorTui.class, "max-inferencer", "STRING", true, "LoopyBP.createForMaxProduct()",
"Specification of inferencer.", null);
private static CommandOption.String evalOption = new CommandOption.String
(AcrfExtractorTui.class, "eval", "STRING", true, "LOG",
"Evaluator to use. Java code grokking performed.", null);
private static CommandOption.String extractionEvalOption = new CommandOption.String
(AcrfExtractorTui.class, "extraction-eval", "STRING", true, "PerDocumentF1",
"Evaluator to use. Java code grokking performed.", null);
private static CommandOption.Integer checkpointIterations = new CommandOption.Integer
(AcrfExtractorTui.class, "checkpoint", "INT", true, -1, "Save a copy after every ___ iterations.", null);
static CommandOption.Boolean cacheUnrolledGraph = new CommandOption.Boolean
(AcrfExtractorTui.class, "cache-graphs", "true|false", true, true,
"Whether to use memory-intensive caching.", null);
static CommandOption.Boolean perTemplateTrain = new CommandOption.Boolean
(AcrfExtractorTui.class, "per-template-train", "true|false", true, false,
"Whether to pretrain templates before joint training.", null);
static CommandOption.Integer pttIterations = new CommandOption.Integer
(AcrfExtractorTui.class, "per-template-iterations", "INTEGER", false, 100,
"How many training iterations for each step of per-template-training.", null);
static CommandOption.Integer randomSeedOption = new CommandOption.Integer
(AcrfExtractorTui.class, "random-seed", "INTEGER", true, 0,
"The random seed for randomly selecting a proportion of the instance list for training", null);
static CommandOption.Boolean useTokenText = new CommandOption.Boolean
(AcrfExtractorTui.class, "use-token-text", "true|false", true, true,
"If true, first feature in list is assumed to be token identity, and is treated specially.", null);
private static CommandOption.Boolean labelsAtEnd = new CommandOption.Boolean
(AcrfExtractorTui.class, "labels-at-end", "INT", true, false,
"If true, then label is at end of each line, rather than beginning.", null);
static CommandOption.Boolean trainingIsList = new CommandOption.Boolean
(AcrfExtractorTui.class, "training-is-list", "true|false", true, false,
"If true, training option gives list of files to read for training.", null);
private static CommandOption.File dataDir = new CommandOption.File
(AcrfExtractorTui.class, "data-dir", "FILENAME", true, null, "If training-is-list, base directory in which training files located.", null);
private static BshInterpreter interpreter = setupInterpreter ();
public static void main (String[] args) throws IOException, EvalError
{
doProcessOptions (AcrfExtractorTui.class, args);
Timing timing = new Timing ();
GenericAcrfData2TokenSequence basePipe;
if (!numLabelsOption.wasInvoked ()) {
basePipe = new GenericAcrfData2TokenSequence ();
} else {
basePipe = new GenericAcrfData2TokenSequence (numLabelsOption.value);
}
if (!useTokenText.value) {
basePipe.setFeaturesIncludeToken(false);
basePipe.setIncludeTokenText(false);
}
basePipe.setLabelsAtEnd (labelsAtEnd.value);
Pipe tokPipe = new SerialPipes (new Pipe[] {
(trainingIsList.value ? new Input2CharSequence () : (Pipe) new Noop ()),
basePipe,
});
Iterator<Instance> trainSource = constructIterator(trainFile.value, dataDir.value, trainingIsList.value);
Iterator<Instance> testSource;
if (testFile.wasInvoked ()) {
testSource = constructIterator (testFile.value, dataDir.value, trainingIsList.value);
} else {
testSource = null;
}
ACRF.Template[] tmpls = parseModelFile (modelFile.value);
ACRFExtractorTrainer trainer = createTrainer (trainerOption.value);
ACRFEvaluator eval = createEvaluator (evalOption.value);
ExtractionEvaluator extractionEval = createExtractionEvaluator (extractionEvalOption.value);
Inferencer inf = createInferencer (inferencerOption.value);
Inferencer maxInf = createInferencer (maxInferencerOption.value);
trainer.setPipes (tokPipe, new TokenSequence2FeatureVectorSequence ())
.setDataSource (trainSource, testSource)
.setEvaluator (eval)
.setTemplates (tmpls)
.setInferencer (inf)
.setViterbiInferencer (maxInf)
.setCheckpointDirectory (outputPrefix.value)
.setNumCheckpointIterations (checkpointIterations.value)
.setCacheUnrolledGraphs (cacheUnrolledGraph.value)
.setUsePerTemplateTrain (perTemplateTrain.value)
.setPerTemplateIterations (pttIterations.value);
logger.info ("Starting training...");
ACRFExtractor extor = trainer.trainExtractor ();
timing.tick ("Training");
FileUtils.writeGzippedObject (new File (outputPrefix.value, "extor.ser.gz"), extor);
timing.tick ("Serializing");
InstanceList testing = trainer.getTestingData ();
if (testing != null) {
eval.test (extor.getAcrf (), testing, "Final results");
}
if ((extractionEval != null) && (testing != null)) {
Extraction extraction = extor.extract (testing);
extractionEval.evaluate (extraction);
timing.tick ("Evaluting");
}
System.out.println ("Total time (ms) = " + timing.elapsedTime ());
}
private static BshInterpreter setupInterpreter ()
{
BshInterpreter interpreter = CommandOption.getInterpreter ();
try {
interpreter.eval ("import edu.umass.cs.mallet.base.extract.*");
interpreter.eval ("import edu.umass.cs.mallet.grmm.inference.*");
interpreter.eval ("import edu.umass.cs.mallet.grmm.learning.*");
interpreter.eval ("import edu.umass.cs.mallet.grmm.learning.templates.*");
interpreter.eval ("import edu.umass.cs.mallet.grmm.learning.extract.*");
} catch (EvalError e) {
throw new RuntimeException (e);
}
return interpreter;
}
private static Iterator<Instance> constructIterator (File trainFile, File dataDir, boolean isList) throws IOException
{
if (isList) {
return new FileListIterator (trainFile, dataDir, null, null, true);
} else {
return new LineGroupIterator (new FileReader (trainFile), Pattern.compile ("^\\s*$"), true);
}
}
public static ACRFEvaluator createEvaluator (String spec) throws EvalError
{
if (spec.indexOf ('(') >= 0) {
// assume it's Java code, and don't screw with it.
return (ACRFEvaluator) interpreter.eval (spec);
} else {
LinkedList toks = new LinkedList (Arrays.asList (spec.split ("\\s+")));
return createEvaluator (toks);
}
}
private static ExtractionEvaluator createExtractionEvaluator (String spec) throws EvalError
{
if (spec.indexOf ('(') >= 0) {
// assume it's Java code, and don't screw with it.
return (ExtractionEvaluator) interpreter.eval (spec);
} else {
spec = "new "+spec+"Evaluator ()";
return (ExtractionEvaluator) interpreter.eval (spec);
}
}
private static ACRFEvaluator createEvaluator (LinkedList toks)
{
String type = (String) toks.removeFirst ();
if (type.equalsIgnoreCase ("SEGMENT")) {
int slice = Integer.parseInt ((String) toks.removeFirst ());
if (toks.size() % 2 != 0)
throw new RuntimeException ("Error in --eval "+evalOption.value+": Every start tag must have a continue.");
int numTags = toks.size () / 2;
String[] startTags = new String [numTags];
String[] continueTags = new String [numTags];
for (int i = 0; i < numTags; i++) {
startTags[i] = (String) toks.removeFirst ();
continueTags[i] = (String) toks.removeFirst ();
}
return new MultiSegmentationEvaluatorACRF (startTags, continueTags, slice);
} else if (type.equalsIgnoreCase ("LOG")) {
return new DefaultAcrfTrainer.LogEvaluator ();
} else if (type.equalsIgnoreCase ("SERIAL")) {
List evals = new ArrayList ();
while (!toks.isEmpty ()) {
evals.add (createEvaluator (toks));
}
return new AcrfSerialEvaluator (evals);
} else {
throw new RuntimeException ("Error in --eval "+evalOption.value+": illegal evaluator "+type);
}
}
private static ACRFExtractorTrainer createTrainer (String spec) throws EvalError
{
String cmd;
if (spec.indexOf ('(') >= 0) {
// assume it's Java code, and don't screw with it.
cmd = spec;
} else if (spec.endsWith ("Trainer")) {
cmd = "new "+spec+"()";
} else {
cmd = "new "+spec+"Trainer()";
}
// Return whatever the Java code says to
Object trainer = interpreter.eval (cmd);
if (trainer instanceof ACRFExtractorTrainer)
return (ACRFExtractorTrainer) trainer;
else if (trainer instanceof DefaultAcrfTrainer)
return new ACRFExtractorTrainer ().setTrainingMethod ((ACRFTrainer) trainer);
else throw new RuntimeException ("Don't know what to do with trainer "+trainer);
}
private static Inferencer createInferencer (String spec) throws EvalError
{
String cmd;
if (spec.indexOf ('(') >= 0) {
// assume it's Java code, and don't screw with it.
cmd = spec;
} else {
cmd = "new "+spec+"()";
}
// Return whatever the Java code says to
Object inf = interpreter.eval (cmd);
if (inf instanceof Inferencer)
return (Inferencer) inf;
else throw new RuntimeException ("Don't know what to do with inferencer "+inf);
}
public static void doProcessOptions (Class childClass, String[] args)
{
CommandOption.List options = new CommandOption.List ("", new CommandOption[0]);
options.add (childClass);
options.process (args);
options.logOptions (Logger.getLogger (""));
}
private static ACRF.Template[] parseModelFile (File mdlFile) throws IOException, EvalError
{
BufferedReader in = new BufferedReader (new FileReader (mdlFile));
List tmpls = new ArrayList ();
String line = in.readLine ();
while (line != null) {
Object tmpl = interpreter.eval (line);
if (!(tmpl instanceof ACRF.Template)) {
throw new RuntimeException ("Error in "+mdlFile+" line "+in.toString ()+":\n Object "+tmpl+" not a template");
}
tmpls.add (tmpl);
line = in.readLine ();
}
return (ACRF.Template[]) tmpls.toArray (new ACRF.Template [0]);
}
}
| 13,734 | 38.582133 | 149 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/learning/extract/ACRFExtractorTrainer.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.learning.extract;
import java.util.Iterator;
import java.util.Random;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.logging.Logger;
import java.io.File;
import cc.mallet.extract.Extraction;
import cc.mallet.extract.TokenizationFilter;
import cc.mallet.grmm.inference.Inferencer;
import cc.mallet.grmm.learning.*;
import cc.mallet.grmm.util.RememberTokenizationPipe;
import cc.mallet.grmm.util.PipedIterator;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.PipeUtils;
import cc.mallet.pipe.iterator.PipeInputIterator;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Instance;
import cc.mallet.util.CollectionUtils;
import cc.mallet.util.FileUtils;
import cc.mallet.util.MalletLogger;
import cc.mallet.util.Timing;
/**
* Created: Mar 31, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: ACRFExtractorTrainer.java,v 1.1 2007/10/22 21:38:02 mccallum Exp $
*/
public class ACRFExtractorTrainer {
private static final Logger logger = MalletLogger.getLogger (ACRFExtractorTrainer.class.getName());
private int numIter = 99999;
protected ACRF.Template[] tmpls;
protected InstanceList training;
protected InstanceList testing;
private Iterator<Instance> testIterator;
private Iterator<Instance> trainIterator;
ACRFTrainer trainer = new DefaultAcrfTrainer ();
protected Pipe featurePipe;
protected Pipe tokPipe;
protected ACRFEvaluator evaluator = new DefaultAcrfTrainer.LogEvaluator ();
TokenizationFilter filter;
private Inferencer inferencer;
private Inferencer viterbiInferencer;
private int numCheckpointIterations = -1;
private File checkpointDirectory = null;
private boolean usePerTemplateTrain = false;
private int perTemplateIterations = 100;
private boolean cacheUnrolledGraphs;
// For data subsets
private Random r;
private double trainingPct = -1;
private double testingPct = -1;
// Using cascaded setter idiom
public ACRFExtractorTrainer setTemplates (ACRF.Template[] tmpls)
{
this.tmpls = tmpls;
return this;
}
public ACRFExtractorTrainer setDataSource (Iterator<Instance> trainIterator, Iterator<Instance> testIterator)
{
this.trainIterator = trainIterator;
this.testIterator = testIterator;
return this;
}
public ACRFExtractorTrainer setData (InstanceList training, InstanceList testing)
{
this.training = training;
this.testing = testing;
return this;
}
public ACRFExtractorTrainer setNumIterations (int numIter)
{
this.numIter = numIter;
return this;
}
public int getNumIter ()
{
return numIter;
}
public ACRFExtractorTrainer setPipes (Pipe tokPipe, Pipe featurePipe)
{
RememberTokenizationPipe rtp = new RememberTokenizationPipe ();
this.featurePipe = PipeUtils.concatenatePipes (rtp, featurePipe);
this.tokPipe = tokPipe;
return this;
}
public ACRFExtractorTrainer setEvaluator (ACRFEvaluator evaluator)
{
this.evaluator = evaluator;
return this;
}
public ACRFExtractorTrainer setTrainingMethod (ACRFTrainer acrfTrainer)
{
trainer = acrfTrainer;
return this;
}
public ACRFExtractorTrainer setTokenizatioFilter (TokenizationFilter filter)
{
this.filter = filter;
return this;
}
public ACRFExtractorTrainer setCacheUnrolledGraphs (boolean cacheUnrolledGraphs)
{
this.cacheUnrolledGraphs = cacheUnrolledGraphs;
return this;
}
public ACRFExtractorTrainer setNumCheckpointIterations (int numCheckpointIterations)
{
this.numCheckpointIterations = numCheckpointIterations;
return this;
}
public ACRFExtractorTrainer setCheckpointDirectory (File checkpointDirectory)
{
this.checkpointDirectory = checkpointDirectory;
return this;
}
public ACRFExtractorTrainer setUsePerTemplateTrain (boolean usePerTemplateTrain)
{
this.usePerTemplateTrain = usePerTemplateTrain;
return this;
}
public ACRFExtractorTrainer setPerTemplateIterations (int numIter)
{
this.perTemplateIterations = numIter;
return this;
}
public ACRFTrainer getTrainer ()
{
return trainer;
}
public TokenizationFilter getFilter ()
{
return filter;
}
// Main methods
public ACRFExtractor trainExtractor ()
{
ACRF acrf = (usePerTemplateTrain) ? perTemplateTrain() : trainAcrf ();
ACRFExtractor extor = new ACRFExtractor (acrf, tokPipe, featurePipe);
if (filter != null) extor.setTokenizationFilter (filter);
return extor;
}
private ACRF perTemplateTrain ()
{
Timing timing = new Timing ();
boolean hasConverged = false;
ACRF miniAcrf = null;
if (training == null) setupData ();
for (int ti = 0; ti < tmpls.length; ti++) {
ACRF.Template[] theseTmpls = new ACRF.Template[ti+1];
System.arraycopy (tmpls, 0, theseTmpls, 0, theseTmpls.length);
logger.info ("***PerTemplateTrain: Round "+ti+"\n Templates: "+
CollectionUtils.dumpToString (Arrays.asList (theseTmpls), " "));
miniAcrf = new ACRF (featurePipe, theseTmpls);
setupAcrf (miniAcrf);
ACRFEvaluator eval = setupEvaluator ("tmpl"+ti);
hasConverged = trainer.train (miniAcrf, training, null, testing, eval, perTemplateIterations);
timing.tick ("PerTemplateTrain round "+ti);
}
// finish by training to convergence
ACRFEvaluator eval = setupEvaluator ("full");
if (!hasConverged)
trainer.train (miniAcrf, training, null, testing, eval, numIter);
// the last acrf is the one to go with;
return miniAcrf;
}
/**
* Trains a new ACRF object with the given settings. Subclasses may override this method
* to implement alternative training procedures.
* @return a trained ACRF
*/
public ACRF trainAcrf ()
{
if (training == null) setupData ();
ACRF acrf = new ACRF (featurePipe, tmpls);
setupAcrf (acrf);
ACRFEvaluator eval = setupEvaluator ("");
trainer.train (acrf, training, null, testing, eval, numIter);
return acrf;
}
private void setupAcrf (ACRF acrf)
{
if (cacheUnrolledGraphs) acrf.setCacheUnrolledGraphs (true);
if (inferencer != null) acrf.setInferencer (inferencer);
if (viterbiInferencer != null) acrf.setViterbiInferencer (viterbiInferencer);
}
private ACRFEvaluator setupEvaluator (String checkpointPrefix)
{
ACRFEvaluator eval = evaluator;
if (numCheckpointIterations > 0) {
List evals = new ArrayList ();
evals.add (evaluator);
evals.add (new CheckpointingEvaluator (checkpointDirectory, numCheckpointIterations, tokPipe, featurePipe));
eval = new AcrfSerialEvaluator (evals);
}
return eval;
}
protected void setupData ()
{
Timing timing = new Timing ();
training = new InstanceList (featurePipe);
training.addThruPipe (new PipedIterator (trainIterator, tokPipe));
if (trainingPct > 0) training = subsetData (training, trainingPct);
if (testIterator != null) {
testing = new InstanceList (featurePipe);
testing.addThruPipe (new PipedIterator (testIterator, tokPipe));
if (testingPct > 0) testing = subsetData (testing, trainingPct);
}
timing.tick ("Data loading");
}
private InstanceList subsetData (InstanceList data, double pct)
{
InstanceList[] lsts = data.split (r, new double[] { pct, 1 - pct });
return lsts[0];
}
public InstanceList getTrainingData ()
{
if (training == null) setupData ();
return training;
}
public InstanceList getTestingData ()
{
if (testing == null) setupData ();
return testing;
}
public Extraction extractOnTestData (ACRFExtractor extor)
{
return extor.extract (testing);
}
public ACRFExtractorTrainer setInferencer (Inferencer inferencer)
{
this.inferencer = inferencer;
return this;
}
public ACRFExtractorTrainer setViterbiInferencer (Inferencer viterbiInferencer)
{
this.viterbiInferencer = viterbiInferencer;
return this;
}
public ACRFExtractorTrainer setDataSubsets (Random random, double trainingPct, double testingPct)
{
r = random;
this.trainingPct = trainingPct;
this.testingPct = testingPct;
return this;
}
// checkpointing
private static class CheckpointingEvaluator extends ACRFEvaluator {
private File directory;
private int interval;
private Pipe tokPipe;
private Pipe featurePipe;
public CheckpointingEvaluator (File directory, int interval, Pipe tokPipe, Pipe featurePipe)
{
this.directory = directory;
this.interval = interval;
this.tokPipe = tokPipe;
this.featurePipe = featurePipe;
}
public boolean evaluate (ACRF acrf, int iter, InstanceList training, InstanceList validation, InstanceList testing)
{
if (iter > 0 && iter % interval == 0) {
ACRFExtractor extor = new ACRFExtractor (acrf, tokPipe, featurePipe);
FileUtils.writeGzippedObject (new File (directory, "extor."+iter+".ser.gz"), extor);
}
return true;
}
public void test (InstanceList gold, List returned, String description) { }
}
}
| 9,562 | 27.978788 | 119 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/learning/extract/ACRFExtractor.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.learning.extract;
import java.util.Iterator;
import cc.mallet.extract.*;
import cc.mallet.grmm.learning.ACRF;
import cc.mallet.grmm.util.SliceLabelsSequence;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.iterator.PipeInputIterator;
import cc.mallet.types.*;
/**
* Created: Mar 1, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: ACRFExtractor.java,v 1.1 2007/10/22 21:38:02 mccallum Exp $
*/
public class ACRFExtractor implements Extractor {
private ACRF acrf;
private Pipe tokPipe;
private Pipe featurePipe;
private int slice = 0;
private String backgroundTag = "O";
private TokenizationFilter filter;
public ACRFExtractor (ACRF acrf, Pipe tokPipe, Pipe featurePipe)
{
this.acrf = acrf;
this.tokPipe = tokPipe;
this.featurePipe = featurePipe;
this.filter = new BIOTokenizationFilter ();
}
public Extraction extract (Object o)
{
throw new UnsupportedOperationException ("Not yet implemented");
}
public Extraction extract (Tokenization toks)
{
throw new UnsupportedOperationException ("Not yet implemented");
}
public Extraction extract (Iterator<Instance> source)
{
Extraction extraction = new Extraction (this, getTargetAlphabet ());
// Put all the instances through both pipes, then get viterbi path
InstanceList tokedList = new InstanceList (tokPipe);
tokedList.addThruPipe (source);
InstanceList pipedList = new InstanceList (getFeaturePipe ());
pipedList.addThruPipe (tokedList.iterator());
Iterator<Instance> it1 = tokedList.iterator ();
Iterator<Instance> it2 = pipedList.iterator ();
while (it1.hasNext()) {
Instance toked = it1.next();
Instance piped = it2.next();
Tokenization tok = (Tokenization) toked.getData();
String name = piped.getName().toString();
Sequence target = (Sequence) piped.getTarget ();
LabelsSequence output = acrf.getBestLabels (piped);
LabelSequence ls = SliceLabelsSequence.sliceLabelsSequence (output, slice);
LabelSequence lsTarget = SliceLabelsSequence.sliceLabelsSequence
((LabelsSequence) target, slice);
DocumentExtraction docseq = new DocumentExtraction (name, getTargetAlphabet (), tok,
ls, lsTarget, backgroundTag, filter);
extraction.addDocumentExtraction (docseq);
}
return extraction;
}
// Experimental: Extract from training lists
public Extraction extract (InstanceList testing)
{
Extraction extraction = new Extraction (this, getTargetAlphabet ());
for (int i = 0; i < testing.size(); i++) {
Instance instance = testing.get (i);
Tokenization tok = (Tokenization) instance.getProperty ("TOKENIZATION");
if (tok == null)
throw new IllegalArgumentException
("To use extract(InstanceList), must save the Tokenization!");
String name = instance.getName ().toString ();
Sequence target = (Sequence) instance.getTarget ();
Sequence output = acrf.getBestLabels (instance);
DocumentExtraction docseq = new DocumentExtraction (name, getTargetAlphabet (), tok,
output, target, backgroundTag, filter);
extraction.addDocumentExtraction (docseq);
}
return extraction;
}
public Pipe getFeaturePipe ()
{
return featurePipe;
}
public Pipe getTokenizationPipe ()
{
return tokPipe;
}
public void setTokenizationPipe (Pipe pipe)
{
tokPipe = pipe;
}
public Alphabet getInputAlphabet ()
{
return acrf.getInputAlphabet ();
}
public LabelAlphabet getTargetAlphabet ()
{
return (LabelAlphabet) acrf.getInputPipe ().getTargetAlphabet ();
}
public ACRF getAcrf ()
{
return acrf;
}
public void setSlice (int sl) { slice = sl; }
public void setTokenizationFilter (TokenizationFilter filter)
{
this.filter = filter;
}
}
| 4,435 | 30.460993 | 97 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/util/GeneralUtils.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.util;
/**
* Static utility methods that didn't seem to belong anywhere else.
*
* Created: Tue Mar 30 14:29:57 2004
*
* @author <a href="mailto:[email protected]">Charles Sutton</a>
* @version $Id: GeneralUtils.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $
*/
public class GeneralUtils {
private GeneralUtils () {} // No instances
public static String classShortName (Object obj)
{
String classname = obj.getClass().getName();
int dotidx = classname.lastIndexOf ('.');
String shortname = classname.substring (dotidx+1);
return shortname;
}
} // Utils
| 1,019 | 31.903226 | 76 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/util/SliceLabelsSequence.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.util;
import cc.mallet.pipe.Pipe;
import cc.mallet.types.*;
/**
* *
* Created: Fri Jan 02 23:27:04 2004
*
* @author <a href="mailto:[email protected]">Charles Sutton</a>
* @version 1.0
*/
public class SliceLabelsSequence extends Pipe {
int slice;
public SliceLabelsSequence(int k) {
super (null, new LabelAlphabet ());
slice = k;
} // SliceLabelsSequence constructor
public Instance pipe (Instance carrier) {
LabelsSequence lbls = (LabelsSequence) carrier.getTarget ();
LabelAlphabet dict = (LabelAlphabet) getTargetAlphabet ();
if (dict == null) {
throw new IllegalArgumentException ("dict is null");
}
LabelSequence ls = sliceLabelsSequence (lbls, dict, slice);
carrier.setTarget (ls);
return carrier;
}
public static LabelSequence sliceLabelsSequence (LabelsSequence lbls, int slice)
{
return sliceLabelsSequence (lbls, lbls.getLabels (0).get (0).getLabelAlphabet (), slice);
}
public static LabelSequence sliceLabelsSequence (LabelsSequence lbls, LabelAlphabet dict, int slice)
{
Label[] labels = new Label [lbls.size()];
for (int t = 0; t < lbls.size(); t++) {
Label l = lbls.getLabels (t).get (slice);
labels [t] = dict.lookupLabel (l.getEntry ());
}
LabelSequence ls = new LabelSequence (labels);
return ls;
}
} // SliceLabelsSequence
| 1,793 | 28.9 | 102 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/util/CSIntInt2ObjectMultiMap.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.util;
import gnu.trove.THashMap;
import gnu.trove.TIntObjectHashMap;
import gnu.trove.TObjectProcedure;
import java.util.Iterator;
import java.util.List;
import java.util.ArrayList;
/**
* A map that maps (int, int) --> object, where each (int,int) key
* is allowed to map to multiple objects.
*
* Created: Dec 14, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: CSIntInt2ObjectMultiMap.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $
*/
public class CSIntInt2ObjectMultiMap {
private TIntObjectHashMap backing = new TIntObjectHashMap ();
public void add (int key1, int key2, Object value)
{
TIntObjectHashMap inner = (TIntObjectHashMap) backing.get (key1);
if (inner == null) {
inner = new TIntObjectHashMap ();
backing.put (key1, inner);
}
List lst = (List) inner.get (key2);
if (lst == null) {
lst = new ArrayList ();
inner. put (key2, lst);
}
lst.add (value);
}
public List get (int key1, int key2)
{
TIntObjectHashMap inner = (TIntObjectHashMap) backing.get (key1);
if (inner == null) {
return null;
} else {
return (List) inner.get (key2);
}
}
public int size ()
{
final int[] N = new int[]{0};
backing.forEachValue (new TObjectProcedure() {
public boolean execute (Object object)
{
TIntObjectHashMap inner = (TIntObjectHashMap) object;
N[0] += inner.size ();
return true;
}
});
return N[0];
}
public void clear () { backing.clear (); }
// not yet serializable
}
| 2,039 | 26.2 | 86 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/util/Matrices.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.util;
import java.util.Arrays;
import cc.mallet.types.Matrix;
import cc.mallet.types.MatrixOps;
import cc.mallet.types.Matrixn;
import cc.mallet.types.SparseMatrixn;
/**
* Static Matrix constructors.
* $Id: Matrices.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $
*/
public class Matrices {
/* Returns a diagonal matrix of the given dimensions. It need not be square. */
public static Matrix diag (int[] sizes, double v)
{
int maxN = MatrixOps.max (sizes);
double[] vals = new double[maxN];
Arrays.fill (vals, v);
/* Compute indices of diagonals */
int[] idxs = new int [maxN];
for (int i = 0; i < idxs.length; i++) {
int[] oneIdx = new int [sizes.length];
Arrays.fill (oneIdx, i);
idxs[i] = Matrixn.singleIndex (sizes, oneIdx);
}
return new SparseMatrixn (sizes, idxs, vals);
}
/* Returns a diagonal matrix of the given dimensions. It need not be square. */
public static Matrix constant (int[] sizes, double v)
{
int singleSize = 1;
for (int i = 0; i < sizes.length; i++) {
singleSize *= sizes[i];
}
double[] vals = new double [singleSize];
Arrays.fill (vals, v);
return new SparseMatrixn (sizes, vals);
}
}
| 1,662 | 27.672414 | 82 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/util/Models.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.util;
import java.util.*;
import cc.mallet.grmm.inference.Inferencer;
import cc.mallet.grmm.inference.JunctionTree;
import cc.mallet.grmm.inference.JunctionTreeInferencer;
import cc.mallet.grmm.types.*;
import gnu.trove.THashSet;
/**
* Static utilities that do useful things with factor graphs.
*
* Created: Sep 22, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: Models.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $
*/
public class Models {
/**
* Returns a new factor graph, the same as a given one, except that all the nodes in
* the given Assignment are clamped as evidence.
* @param mdl Old model. Will not be modified.
* @param assn Evidence to add
* @return A new factor graph.
*/
public static FactorGraph addEvidence (FactorGraph mdl, Assignment assn)
{
return addEvidence (mdl, assn, null);
}
public static FactorGraph addEvidence (FactorGraph mdl, Assignment assn, Map toSlicedMap)
{
FactorGraph newMdl = new FactorGraph (mdl.numVariables ());
addSlicedPotentials (mdl, newMdl, assn, toSlicedMap);
return newMdl;
}
public static UndirectedModel addEvidence (UndirectedModel mdl, Assignment assn)
{
UndirectedModel newMdl = new UndirectedModel (mdl.numVariables ());
addSlicedPotentials (mdl, newMdl, assn, null);
return newMdl;
}
private static void addSlicedPotentials (FactorGraph fromMdl, FactorGraph toMdl, Assignment assn, Map toSlicedMap)
{
Set inputVars = new THashSet (Arrays.asList (assn.getVars ()));
Set remainingVars = new THashSet (fromMdl.variablesSet ());
remainingVars.removeAll (inputVars);
for (Iterator it = fromMdl.factorsIterator (); it.hasNext ();) {
Factor ptl = (Factor) it.next ();
Set theseVars = new THashSet (ptl.varSet ());
theseVars.retainAll (remainingVars);
Factor slicedPtl = ptl.slice (assn);
toMdl.addFactor (slicedPtl);
if (toSlicedMap != null) {
toSlicedMap.put (ptl, slicedPtl);
}
}
}
/**
* Returns the highest-score Assignment in a model according to a given inferencer.
* @param mdl Factor graph to use
* @param inf Inferencer to use. No need to call <tt>computeMarginals</tt> first.
* @return An Assignment
*/
public static Assignment bestAssignment (FactorGraph mdl, Inferencer inf)
{
inf.computeMarginals (mdl);
int[] outcomes = new int [mdl.numVariables ()];
for (int i = 0; i < outcomes.length; i++) {
Variable var = mdl.get (i);
int best = inf.lookupMarginal (var).argmax ();
outcomes[i] = best;
}
return new Assignment (mdl, outcomes);
}
/**
* Computes the exact entropy of a factor graph distribution using the junction tree algorithm.
* If the model is intractable, then this method won't return a number anytime soon.
*/
public static double entropy (FactorGraph mdl)
{
JunctionTreeInferencer inf = new JunctionTreeInferencer ();
inf.computeMarginals (mdl);
JunctionTree jt = inf.lookupJunctionTree ();
return jt.entropy ();
}
/**
* Computes the KL divergence <tt>KL(mdl1||mdl2)</tt>. Junction tree is used to compute the entropy.
* <p>
* TODO: This probably won't handle when the jnuction tree for MDL2 contains a clique that's not present in the
* junction tree for mdl1. If so, this is a bug.
*
* @param mdl1
* @param mdl2
* @return KL(mdl1||mdl2)
*/
public static double KL (FactorGraph mdl1, FactorGraph mdl2)
{
JunctionTreeInferencer inf1 = new JunctionTreeInferencer ();
inf1.computeMarginals (mdl1);
JunctionTree jt1 = inf1.lookupJunctionTree ();
JunctionTreeInferencer inf2 = new JunctionTreeInferencer ();
inf2.computeMarginals (mdl2);
JunctionTree jt2 = inf2.lookupJunctionTree ();
double entropy = jt1.entropy ();
double energy = 0;
for (Iterator it = jt2.clusterPotentials ().iterator(); it.hasNext();) {
Factor marg2 = (Factor) it.next ();
Factor marg1 = inf1.lookupMarginal (marg2.varSet ());
for (AssignmentIterator assnIt = marg2.assignmentIterator (); assnIt.hasNext();) {
energy += marg1.value (assnIt) * marg2.logValue (assnIt);
assnIt.advance();
}
}
for (Iterator it = jt2.sepsetPotentials ().iterator(); it.hasNext();) {
Factor marg2 = (Factor) it.next ();
Factor marg1 = inf1.lookupMarginal (marg2.varSet ());
for (AssignmentIterator assnIt = marg2.assignmentIterator (); assnIt.hasNext();) {
energy -= marg1.value (assnIt) * marg2.logValue (assnIt);
assnIt.advance();
}
}
return -entropy - energy;
}
public static void removeConstantFactors (FactorGraph sliced)
{
List factors = new ArrayList (sliced.factors ());
for (Iterator it = factors.iterator (); it.hasNext();) {
Factor factor = (Factor) it.next ();
if (factor instanceof ConstantFactor) {
sliced.divideBy (factor);
}
}
}
}
| 5,433 | 33.833333 | 116 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/util/THashMultiMap.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.util;
import gnu.trove.THashMap;
import java.util.*;
/**
* Version of THashMap where every key is mapped to a list of objects.
* <p>
* The put method adds a value to the list associated with a key, without
* removing any previous values.
* The get method returns the list of all objects associated with key.
* No effort is made to remove duplicates.
*
* Created: Dec 13, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: THashMultiMap.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $
*/
public class THashMultiMap extends AbstractMap {
private THashMap backing;
public THashMultiMap ()
{
backing = new THashMap ();
}
public THashMultiMap (int initialCapacity)
{
backing = new THashMap (initialCapacity);
}
public Set entrySet ()
{
return backing.entrySet (); // potentially inefficient
}
/** Adds <tt>key</tt> as a key with an empty list as a value. */
public void add (Object key) { backing.put (key, new ArrayList ()); }
public Object get (Object o)
{
return (List) backing.get (o);
}
/** Adds <tt>value</tt> to the list of things mapped to by key.
* @return The current list of values associated with key.
* (N.B. This deviates from Map contract slightly! (Hopefully harmlessly))
*/
public Object put (Object key, Object value)
{
List lst;
if (!backing.keySet ().contains (key)) {
lst = new ArrayList ();
backing.put (key, lst);
} else {
lst = (List) backing.get (key);
}
lst.add (value);
return lst;
}
// Serialization not yet supported
}
| 2,068 | 26.586667 | 83 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/util/LabelsAssignment.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.util;
import java.util.Map;
import java.util.List;
import java.util.ArrayList;
import cc.mallet.grmm.types.Assignment;
import cc.mallet.grmm.types.Variable;
import cc.mallet.types.Label;
import cc.mallet.types.LabelAlphabet;
import cc.mallet.types.Labels;
import cc.mallet.types.LabelsSequence;
import gnu.trove.THashMap;
import gnu.trove.TIntArrayList;
/**
* A special kind of assignment for Variables that
* can be arranged in a LabelsSequence. This is an Adaptor
* to adapt LabelsSequences to Assignments.
* <p/>
* $Id: LabelsAssignment.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $
*/
public class LabelsAssignment extends Assignment {
// these are just for printing; race conditions don't matter
private static int NEXT_ID = 0;
private int id = NEXT_ID++;
private Variable[][] idx2var;
private LabelsSequence lblseq;
private Map var2label;
public LabelsAssignment (LabelsSequence lbls)
{
super ();
this.lblseq = lbls;
setupLabel2Var ();
addRow (toVariableArray (), toValueArray ());
}
private Variable[] toVariableArray ()
{
List vars = new ArrayList (maxTime () * numSlices ());
for (int t = 0; t < idx2var.length; t++) {
for (int j = 0; j < idx2var[t].length; j++) {
vars.add (idx2var[t][j]);
}
}
return (Variable[]) vars.toArray (new Variable [vars.size ()]);
}
private int[] toValueArray ()
{
TIntArrayList vals = new TIntArrayList (maxTime () * numSlices ());
for (int t = 0; t < lblseq.size (); t++) {
Labels lbls = lblseq.getLabels (t);
for (int j = 0; j < lbls.size (); j++) {
Label lbl = lbls.get (j);
vals.add (lbl.getIndex ());
}
}
return vals.toNativeArray ();
}
private void setupLabel2Var ()
{
idx2var = new Variable [lblseq.size ()][];
var2label = new THashMap ();
for (int t = 0; t < lblseq.size (); t++) {
Labels lbls = lblseq.getLabels (t);
idx2var[t] = new Variable [lbls.size ()];
for (int j = 0; j < lbls.size (); j++) {
Label lbl = lbls.get (j);
Variable var = new Variable (lbl.getLabelAlphabet ());
var.setLabel ("I"+id+"_VAR[f=" + j + "][tm=" + t + "]");
idx2var[t][j] = var;
var2label.put (var, lbl);
}
}
}
public Variable varOfIndex (int t, int j)
{
return idx2var[t][j];
}
public Label labelOfVar (Variable var) { return (Label) var2label.get (var); }
public int maxTime () { return lblseq.size (); }
// assumes that lblseq not ragged
public int numSlices () { return idx2var[0].length; }
public LabelsSequence getLabelsSequence ()
{
return lblseq;
}
public LabelsSequence toLabelsSequence (Assignment assn)
{
int numFactors = numSlices ();
int maxTime = maxTime ();
Labels[] lbls = new Labels [maxTime];
for (int t = 0; t < maxTime; t++) {
Label[] theseLabels = new Label [numFactors];
for (int i = 0; i < numFactors; i++) {
Variable var = varOfIndex (t, i);
int maxidx;
if (var != null) {
maxidx = assn.get (var);
} else {
maxidx = 0;
}
LabelAlphabet dict = labelOfVar (var).getLabelAlphabet ();
theseLabels[i] = dict.lookupLabel (maxidx);
}
lbls[t] = new Labels (theseLabels);
}
return new LabelsSequence (lbls);
}
public LabelAlphabet getOutputAlphabet (int lvl)
{
return idx2var[0][lvl].getLabelAlphabet ();
}
}
| 3,906 | 26.907143 | 80 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/util/LabelsSequence2Assignment.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.util;
import cc.mallet.pipe.Pipe;
import cc.mallet.types.Instance;
import cc.mallet.types.LabelsSequence;
/**
* $Id: LabelsSequence2Assignment.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $
*/
public class LabelsSequence2Assignment extends Pipe {
public Instance pipe (Instance carrier)
{
LabelsSequence lbls = (LabelsSequence) carrier.getTarget ();
carrier.setTarget (new LabelsAssignment (lbls));
return carrier;
}
}
| 881 | 34.28 | 79 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/util/ModelReader.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.util;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.Reader;
import java.util.List;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.regex.Pattern;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import cc.mallet.grmm.types.*;
import gnu.trove.THashMap;
import bsh.Interpreter;
import bsh.EvalError;
/**
* $Id: ModelReader.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $
*/
public class ModelReader {
private static THashMap allClasses;
static {
allClasses = new THashMap ();
// add new classes here
allClasses.put ("potts", PottsTableFactor.class);
allClasses.put ("unary", BoltzmannUnaryFactor.class);
allClasses.put ("binaryunary", BinaryUnaryFactor.class);
allClasses.put ("binarypair", BoltzmannPairFactor.class);
allClasses.put ("uniform", UniformFactor.class);
allClasses.put ("normal", UniNormalFactor.class);
allClasses.put ("beta", BetaFactor.class);
}
private THashMap name2var = new THashMap ();
public static Assignment readFromMatrix (VarSet vars, Reader in) throws IOException
{
Variable[] varr = vars.toVariableArray ();
Interpreter interpreter = new Interpreter ();
BufferedReader bIn = new BufferedReader (in);
Assignment assn = new Assignment ();
String line;
while ((line = bIn.readLine ()) != null) {
String[] fields = line.split ("\\s+");
Object[] vals = new Object [fields.length];
for (int i = 0; i < fields.length; i++) {
try {
vals[i] = interpreter.eval (fields[i]);
} catch (EvalError e) {
throw new RuntimeException ("Error reading line: "+line, e);
}
}
assn.addRow (varr, vals);
}
return assn;
}
public FactorGraph readModel (BufferedReader in) throws IOException
{
List factors = new ArrayList ();
String line;
while ((line = in.readLine ()) != null) {
try {
String[] fields = line.split ("\\s+");
if (fields[0].equalsIgnoreCase ("VAR")) {
// a variable declaration
handleVariableDecl (fields);
} else {
// a factor line
Factor factor = factorFromLine (fields);
factors.add (factor);
}
} catch (Exception e) {
throw new RuntimeException ("Error reading line:\n"+line, e);
}
}
FactorGraph fg = new FactorGraph ();
for (Iterator it = factors.iterator (); it.hasNext ();) {
Factor factor = (Factor) it.next ();
fg.multiplyBy (factor);
}
return fg;
}
private void handleVariableDecl (String[] fields)
{
int colonIdx = findColon (fields);
if (fields.length != colonIdx + 2) throw new IllegalArgumentException ("Invalid syntax");
String numOutsString = fields[colonIdx+1];
int numOutcomes;
if (numOutsString.equalsIgnoreCase ("continuous")) {
numOutcomes = Variable.CONTINUOUS;
} else {
numOutcomes = Integer.parseInt (numOutsString);
}
for (int i = 0; i < colonIdx; i++) {
String name = fields[i];
Variable var = new Variable (numOutcomes);
var.setLabel (name);
name2var.put (name, var);
}
}
private int findColon (String[] fields)
{
for (int i = 0; i < fields.length; i++) {
if (fields[i].equals (":")) {
return i;
}
}
throw new IllegalArgumentException ("Invalid syntax.");
}
private Factor factorFromLine (String[] fields)
{
int idx = findTwiddle (fields);
return constructFactor (fields, idx);
}
private int findTwiddle (String[] fields)
{
for (int i = 0; i < fields.length; i++) {
if (fields[i].equals ("~")) {
return i;
}
}
return -1;
}
private Factor constructFactor (String[] fields, int idx)
{
Class factorClass = determineFactorClass (fields, idx);
Object[] args = determineFactorArgs (fields, idx);
Constructor factorCtor = findCtor (factorClass, args);
Factor factor;
try {
factor = (Factor) factorCtor.newInstance (args);
} catch (InstantiationException e) {
throw new RuntimeException (e);
} catch (IllegalAccessException e) {
throw new RuntimeException (e);
} catch (InvocationTargetException e) {
throw new RuntimeException (e);
}
return factor;
}
private Constructor findCtor (Class factorClass, Object[] args)
{
Class[] argClass = new Class[args.length];
for (int i = 0; i < args.length; i++) {
argClass[i] = args[i].getClass ();
// special case
if (argClass[i] == Double.class) { argClass[i] = double.class; }
}
try {
return factorClass.getDeclaredConstructor (argClass);
} catch (NoSuchMethodException e) {
throw new RuntimeException ("Invalid arguments for factor "+factorClass);
}
}
private Class determineFactorClass (String[] fields, int twiddleIdx)
{
String factorName = fields [twiddleIdx + 1].toLowerCase ();
Class theClass = (Class) allClasses.get (factorName);
if (theClass != null) {
return theClass;
} else {
throw new RuntimeException ("Could not determine factor class from "+factorName);
}
}
private Object[] determineFactorArgs (String[] fields, int twiddleIdx)
{
List args = new ArrayList (fields.length);
for (int i = 0; i < twiddleIdx; i++) {
args.add (varFromName (fields[i], true));
}
for (int i = twiddleIdx+2; i < fields.length; i++) {
args.add (varFromName (fields[i], false));
}
return args.toArray ();
}
private static Pattern nbrRegex = Pattern.compile ("[+-]?(\\d+(\\.[\\d]*)|\\.\\d+)");
private Object varFromName (String name, boolean preTwiddle)
{
if (nbrRegex.matcher(name).matches ()) {
return new Double (Double.parseDouble (name));
} else if (name2var.contains (name)) {
return name2var.get (name);
} else {
Variable var = (preTwiddle) ? new Variable (2) : new Variable (Variable.CONTINUOUS);
var.setLabel (name);
name2var.put (name, var);
return var;
}
}
}
| 6,553 | 28.128889 | 93 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/util/RememberTokenizationPipe.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.util;
import cc.mallet.extract.Tokenization;
import cc.mallet.pipe.Pipe;
import cc.mallet.types.Instance;
/**
* Created: Mar 17, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: RememberTokenizationPipe.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $
*/
public class RememberTokenizationPipe extends Pipe {
public Instance pipe (Instance carrier)
{
Tokenization tok = (Tokenization) carrier.getData ();
carrier.setProperty ("TOKENIZATION", tok);
return carrier;
}
}
| 978 | 33.964286 | 87 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/util/Flops.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.util;
/**
* Utilities for flop (floating-point operation) counting. This is a much better
* way to measure computation than CPU time, because it avoids measuring non-essential
* properties of the implementations.
*
* $Id: Flops.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $
*/
public class Flops {
private static long flops = 0;
// this figures taken from Tom Minka's lightspeed
private static final int EXP_FLOPS = 40;
private static final int LOG_FLOPS = 20;
private static final int DIV_FLOPS = 8;
private static final int SQRT_FLOPS = 8;
public static long getFlops ()
{
return flops;
}
public static void exp ()
{
flops += EXP_FLOPS;
}
public static void log ()
{
flops += LOG_FLOPS;
}
public static void div ()
{
flops += DIV_FLOPS;
}
public static void sqrt ()
{
flops += SQRT_FLOPS;
}
public static void sumLogProb (int n)
{
flops += n * (LOG_FLOPS + EXP_FLOPS);
}
public static void increment (int N)
{
flops += N;
}
public static void log (int N)
{
flops += N * LOG_FLOPS;
}
public static void exp (int N)
{
flops += N * EXP_FLOPS;
}
public static void pow (int N)
{
// Get an upper bound using
// a^b = exp(b*log(a))
Flops.increment (N * (EXP_FLOPS + LOG_FLOPS + 1));
}
public static class Watch {
private long starting;
private long current;
public Watch ()
{
starting = flops;
current = starting;
}
public long tick ()
{
return tick (null);
}
public long tick (String message)
{
long elapsed = flops - current;
current = flops;
if (message != null) System.err.println (message+" flops = "+elapsed);
return elapsed;
}
public long totalFlopsElapsed () { return flops - starting; }
}
}
| 2,282 | 20.742857 | 87 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/util/Graphs.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.util;
import org._3pq.jgrapht.UndirectedGraph;
import org._3pq.jgrapht.graph.SimpleGraph;
import cc.mallet.grmm.types.Factor;
import cc.mallet.grmm.types.FactorGraph;
import cc.mallet.grmm.types.VarSet;
import cc.mallet.grmm.types.Variable;
import java.util.Iterator;
/**
* Created: Dec 21, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: Graphs.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $
*/
public class Graphs {
/**
* Converts a FactorGraph into a plain graph where each Variable is a vertex,
* and two Variables are connected by an edge if they are arguments to the same factor.
* (Essentially converts an fg into an MRF structure, minus the factors.)
* @param fg
* @return a Graph
*/
public static UndirectedGraph mdlToGraph (FactorGraph fg)
{
UndirectedGraph g = new SimpleGraph ();
for (Iterator it = fg.variablesIterator (); it.hasNext ();) {
Variable var = (Variable) it.next ();
g.addVertex (var);
}
for (Iterator it = fg.factorsIterator (); it.hasNext ();) {
Factor factor = (Factor) it.next ();
VarSet varSet = factor.varSet ();
int nv = varSet.size ();
for (int i = 0; i < nv; i++) {
for (int j = i + 1; j < nv; j++) {
g.addEdge (varSet.get (i), varSet.get (j));
}
}
}
return g;
}
}
| 1,821 | 30.413793 | 90 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/util/PipedIterator.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.util;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.iterator.PipeInputIterator;
import cc.mallet.types.Instance;
import java.util.Iterator;
/**
* Created: Mar 3, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: PipedIterator.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $
*/
@Deprecated // With the new Pipe's able to act as iterators themselves, this should no longer be necessary
public class PipedIterator implements Iterator<Instance> {
Iterator<Instance> subIt;
Pipe pipe;
public PipedIterator (Iterator<Instance> subIt, Pipe pipe)
{
this.subIt = subIt;
this.pipe = pipe;
}
// The PipeInputIterator interface
public Instance next ()
{
Instance inst = subIt.next ();
inst = pipe.pipe (inst);
return new Instance (inst.getData (), inst.getTarget (), inst.getName (), inst.getSource ());
}
public boolean hasNext ()
{
return subIt.hasNext ();
}
public void remove () { throw new IllegalStateException ("This Iterator<Instance> does not implement remove()."); }
}
| 1,520 | 31.361702 | 117 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/util/MIntInt2ObjectMap.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.util;
import gnu.trove.TIntObjectHashMap;
import gnu.trove.TObjectProcedure;
import gnu.trove.TIntObjectIterator;
import java.io.Serializable;
import java.io.ObjectOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
/**
* Hash map that maps integer pairs to objects.
* This uses much less space than an 2d array, if the mapping is sparse.
*
* Created: Dec 14, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: MIntInt2ObjectMap.java,v 1.1 2007/10/22 21:37:58 mccallum Exp $
*/
public class MIntInt2ObjectMap implements Serializable {
private TIntObjectHashMap backing = new TIntObjectHashMap ();
public MIntInt2ObjectMap () { }
public MIntInt2ObjectMap (int initialCapacity) {
backing = new TIntObjectHashMap (initialCapacity);
}
public Object put (int key1, int key2, Object value)
{
TIntObjectHashMap inner;
if (backing.containsKey (key1)) {
inner = (TIntObjectHashMap) backing.get (key1);
} else {
inner = new TIntObjectHashMap ();
backing.put (key1, inner);
}
return inner.put (key2, value);
}
public Object get (int key1, int key2)
{
TIntObjectHashMap inner = (TIntObjectHashMap) backing.get (key1);
if (inner == null) {
return null;
} else {
return inner.get (key2);
}
}
/** Returns an iterator over the set of (key2, value) pairs that match (key1). */
public TIntObjectIterator curry (int key1)
{
final TIntObjectHashMap inner = (TIntObjectHashMap) backing.get (key1);
if (inner == null) {
return new TIntObjectIterator (new TIntObjectHashMap ());
} else {
return new TIntObjectIterator (inner);
}
}
/** Returns an array of first-level keys. */
public int[] keys1 () {
return backing.keys ();
}
public int size ()
{
final int[] N = new int[]{0};
backing.forEachValue (new TObjectProcedure() {
public boolean execute (Object object)
{
TIntObjectHashMap inner = (TIntObjectHashMap) object;
N[0] += inner.size ();
return true;
}
});
return N[0];
}
public int[] keys2 (int key1)
{
TIntObjectHashMap inner = (TIntObjectHashMap) backing.get (key1);
return inner.keys ();
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException
{
out.writeInt (CURRENT_SERIAL_VERSION);
int[] keys1 = keys1 ();
out.writeInt (keys1.length);
for (int i = 0; i < keys1.length; i++) {
int k1 = keys1[i];
out.writeInt (k1);
int[] keys2 = keys2 (k1);
out.writeInt (keys2.length);
for (int j = 0; j < keys2.length; j++) {
int k2 = keys2[j];
out.writeInt (k2);
out.writeObject (get (k1, k2));
}
}
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException
{
in.readInt (); // version =
int N1 = in.readInt ();
backing = new TIntObjectHashMap (N1);
for (int i = 0; i < N1; i++) {
int k1 = in.readInt ();
int N2 = in.readInt ();
for (int j = 0; j < N2; j++) {
int k2 = in.readInt ();
Object value = in.readObject ();
put (k1, k2, value);
}
}
}
}
| 3,788 | 25.87234 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestDirectedModel.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import cc.mallet.grmm.inference.BruteForceInferencer;
import cc.mallet.grmm.types.*;
import junit.framework.*;
/**
* Created: Mar 28, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestDirectedModel.java,v 1.1 2007/10/22 21:37:41 mccallum Exp $
*/
public class TestDirectedModel extends TestCase {
private CPT pA;
private CPT pB;
private CPT pC;
private DiscreteFactor fA;
private DiscreteFactor fB;
private DiscreteFactor fC;
private Variable[] vars;
private Variable A;
private Variable B;
private Variable C;
public TestDirectedModel (String name)
{
super (name);
A = new Variable (2);
B = new Variable (2);
C = new Variable (2);
vars = new Variable[] { A, B, C };
fA = LogTableFactor.makeFromValues (A, new double[] { 1, 4 });
fB = LogTableFactor.makeFromValues (B, new double[] { 3, 2 });
double[] vals = new double[] { 3, 7, 5, 5, 9, 1, 6, 4, };
fC = new TableFactor (vars, vals);
pA = Factors.normalizeAsCpt ((AbstractTableFactor) fA.duplicate (), A);
pB = Factors.normalizeAsCpt ((AbstractTableFactor) fB.duplicate (), B);
pC = Factors.normalizeAsCpt ((AbstractTableFactor) fC.duplicate (), C);
}
public void testSimpleModel ()
{
FactorGraph fg1 = new FactorGraph (vars);
fg1.addFactor (pA);
fg1.addFactor (pB);
fg1.addFactor (fC);
DirectedModel dm = new DirectedModel (vars);
dm.addFactor (pA);
dm.addFactor (pB);
dm.addFactor (pC);
BruteForceInferencer inf = new BruteForceInferencer ();
DiscreteFactor joint1 = (DiscreteFactor) inf.joint (fg1);
DiscreteFactor joint2 = (DiscreteFactor) inf.joint (dm);
comparePotentials (joint1, joint2);
}
private void comparePotentials (DiscreteFactor fActual, DiscreteFactor fExpected)
{
double[] actual = fActual.toValueArray ();
double[] expected = fExpected.toValueArray ();
assertEquals (expected.length, actual.length);
for (int i = 0; i < expected.length; i++) {
assertEquals (expected[i], actual[i], 0.001);
}
}
public void testCycleChecking ()
{
DirectedModel dm = new DirectedModel (vars);
dm.addFactor (pA);
dm.addFactor (pB);
dm.addFactor (pC);
try {
TableFactor f1 = new TableFactor (new Variable[] { B, C });
dm.addFactor (new CPT (f1, B));
assertTrue ("Test failed: No exception thrown.", false);
} catch (IllegalArgumentException e) {
// Exception is expected
}
try {
TableFactor f1 = new TableFactor (new Variable[] { A, C });
dm.addFactor (new CPT (f1, A));
assertTrue ("Test failed: No exception thrown.", false);
} catch (IllegalArgumentException e) {
// Exception is expected
}
}
public void testCptOfVar ()
{
DirectedModel dm = new DirectedModel (vars);
dm.addFactor (pA);
dm.addFactor (pB);
dm.addFactor (pC);
assertTrue (pA == dm.getCptofVar (A));
assertTrue (pB == dm.getCptofVar (B));
assertTrue (pC == dm.getCptofVar (C));
}
public void testFactorReplace ()
{
DirectedModel dm = new DirectedModel (vars);
dm.addFactor (pA);
dm.addFactor (pB);
dm.addFactor (pC);
assertEquals (3, dm.factors ().size ());
TableFactor f1 = new TableFactor (new Variable[] { B, C });
CPT p1 = new CPT (f1, C);
try {
dm.addFactor (p1);
} catch (IllegalArgumentException e) {
// expected
}
}
public static Test suite ()
{
return new TestSuite (TestDirectedModel.class);
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestDirectedModel (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 4,371 | 27.763158 | 83 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestGenericAcrfData2TokenSequence.java | package cc.mallet.grmm.test;
import junit.framework.TestCase;
import junit.framework.Test;
import junit.framework.TestSuite;
import java.io.IOException;
import java.io.StringReader;
import java.util.regex.Pattern;
import cc.mallet.extract.StringTokenization;
import cc.mallet.grmm.learning.GenericAcrfData2TokenSequence;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.iterator.LineGroupIterator;
import cc.mallet.types.*;
import cc.mallet.types.tests.TestSerializable;
/**
* Created: Sep 15, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestGenericAcrfData2TokenSequence.java,v 1.1 2007/10/22 21:37:41 mccallum Exp $
*/
public class TestGenericAcrfData2TokenSequence extends TestCase {
String sampleData = "LBLA LBLC ---- f1 f5 f7\n" +
"LBLB LBLC ---- f5 f6\n" +
"LBLB LBLD ----\n" +
"LBLA LBLD ---- f2 f1\n";
String sampleData2 = "LBLB LBLD ---- f1 f5 f7\n" +
"LBLA LBLC ---- f5 f6\n" +
"LBLA LBLC ----\n" +
"LBLB LBLD ---- f2 f1\n";
String sampleFixedData = "LBLA LBLC f1 f5 f7\n" +
"LBLB LBLC f5 f6\n" +
"LBLB LBLD\n" +
"LBLA LBLD f2 f1\n";
String sampleFixedData2 = "LBLB LBLD f1 f5 f7\n" +
"LBLA LBLC f5 f6\n" +
"LBLA LBLC\n" +
"LBLB LBLD f2 f1\n";
String labelsAtEndData = "f1 f5 f7 LBLB LBLD\n" +
"f5 f6 LBLA LBLC\n" +
"LBLA LBLC\n" +
"f2 f1 LBLB LBLD\n";
public TestGenericAcrfData2TokenSequence (String name)
{
super (name);
}
public void testFromSerialization () throws IOException, ClassNotFoundException
{
Pipe p = new GenericAcrfData2TokenSequence ();
InstanceList training = new InstanceList (p);
training.addThruPipe (new LineGroupIterator (new StringReader (sampleData), Pattern.compile ("^$"), true));
Pipe p2 = (Pipe) TestSerializable.cloneViaSerialization (p);
InstanceList l1 = new InstanceList (p);
l1.addThruPipe (new LineGroupIterator (new StringReader (sampleData2), Pattern.compile ("^$"), true));
InstanceList l2 = new InstanceList (p2);
l2.addThruPipe (new LineGroupIterator (new StringReader (sampleData2), Pattern.compile ("^$"), true));
// the readResolve alphabet thing doesn't kick in on first deserialization
assertTrue (p.getTargetAlphabet () != p2.getTargetAlphabet ());
assertEquals (1, l1.size ());
assertEquals (1, l2.size ());
Instance inst1 = l1.get (0);
Instance inst2 = l2.get (0);
LabelsSequence ls1 = (LabelsSequence) inst1.getTarget ();
LabelsSequence ls2 = (LabelsSequence) inst2.getTarget ();
assertEquals (4, ls1.size ());
assertEquals (4, ls2.size ());
for (int i = 0; i < 4; i++) {
assertEquals (ls1.get (i).toString (), ls2.get (i).toString ());
}
}
public void testFixedNumLabels () throws IOException, ClassNotFoundException
{
Pipe p = new GenericAcrfData2TokenSequence (2);
InstanceList training = new InstanceList (p);
training.addThruPipe (new LineGroupIterator (new StringReader (sampleFixedData), Pattern.compile ("^$"), true));
assertEquals (1, training.size ());
Instance inst1 = training.get (0);
LabelsSequence ls1 = (LabelsSequence) inst1.getTarget ();
assertEquals (4, ls1.size ());
}
public void testLabelsAtEnd () throws IOException, ClassNotFoundException
{
GenericAcrfData2TokenSequence p = new GenericAcrfData2TokenSequence (2);
p.setLabelsAtEnd (true);
InstanceList training = new InstanceList (p);
training.addThruPipe (new LineGroupIterator (new StringReader (labelsAtEndData), Pattern.compile ("^$"), true));
assertEquals (1, training.size ());
Instance inst1 = training.get (0);
StringTokenization toks = (StringTokenization) inst1.getData ();
LabelsSequence ls1 = (LabelsSequence) inst1.getTarget ();
assertEquals (4, ls1.size ());
assertEquals (3, toks.get(0).getFeatures ().size ());
assertEquals ("LBLB LBLD", ls1.getLabels (0).toString ());
LabelAlphabet globalDict = p.getLabelAlphabet (0);
assertEquals (2, p.numLevels ());
assertEquals (globalDict, ls1.getLabels (0).get (0).getLabelAlphabet ());
}
public void testNoTokenText ()
{
GenericAcrfData2TokenSequence p = new GenericAcrfData2TokenSequence (2);
p.setFeaturesIncludeToken(false);
p.setIncludeTokenText(false);
InstanceList training = new InstanceList (p);
training.addThruPipe (new LineGroupIterator (new StringReader (sampleFixedData), Pattern.compile ("^$"), true));
assertEquals (1, training.size ());
Instance inst1 = training.get (0);
LabelsSequence ls1 = (LabelsSequence) inst1.getTarget ();
assertEquals (4, ls1.size ());
TokenSequence ts1 = (TokenSequence) inst1.getData ();
assertEquals (3, ts1.get(0).getFeatures().size ());
assertEquals (2, ts1.get(1).getFeatures().size ());
}
public static Test suite ()
{
return new TestSuite (TestGenericAcrfData2TokenSequence.class);
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestGenericAcrfData2TokenSequence (args[i]));
}
} else {
theSuite = (TestSuite) TestGenericAcrfData2TokenSequence.suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 5,470 | 31.182353 | 116 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestAssignment.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://mallet.cs.umass.edu/
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import junit.framework.TestCase;
import junit.framework.Test;
import junit.framework.TestSuite;
import java.io.IOException;
import cc.mallet.grmm.types.*;
import cc.mallet.types.tests.TestSerializable;
/**
* Created: Aug 11, 2004
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestAssignment.java,v 1.1 2007/10/22 21:37:40 mccallum Exp $
*/
public class TestAssignment extends TestCase {
private Variable[] vars;
/**
* Constructs a test case with the given name.
*/
public TestAssignment (String name)
{
super (name);
}
protected void setUp () throws Exception
{
vars = new Variable[] {
new Variable (2),
new Variable (2),
};
}
public void testSimple ()
{
Assignment assn = new Assignment (vars, new int[] { 1, 0 });
assertEquals (1, assn.get (vars [0]));
assertEquals (0, assn.get (vars [1]));
assertEquals (new Integer (0), assn.getObject (vars[1]));
}
public void testScale ()
{
Assignment assn = new Assignment (vars, new int[] { 1, 0 });
assn.addRow (vars, new int[] { 1, 0 });
assn.addRow (vars, new int[] { 1, 1 });
Assignment assn2 = new Assignment (vars, new int[] { 1, 0 });
assn.normalize ();
assertEquals (0.666666, assn.value (assn2), 1e-5);
}
public void testScaleMarginalize ()
{
Assignment assn = new Assignment (vars, new int[] { 1, 0 });
assn.addRow (vars, new int[] { 1, 0 });
assn.addRow (vars, new int[] { 1, 1 });
assn.normalize ();
Factor mrg = assn.marginalize (vars[1]);
Assignment assn2 = new Assignment (vars[1], 0);
assertEquals (0.666666, mrg.value (assn2), 1e-5);
}
public void testSerialization () throws IOException, ClassNotFoundException
{
Assignment assn = new Assignment (vars, new int[] { 1, 0 });
Assignment assn2 = (Assignment) TestSerializable.cloneViaSerialization (assn);
assertEquals (2, assn2.numVariables ());
assertEquals (1, assn2.numRows ());
assertEquals (1, assn.get (vars [0]));
assertEquals (0, assn.get (vars [1]));
}
public void testMarginalize ()
{
Assignment assn = new Assignment ();
assn.addRow (vars, new int[] { 1, 1 });
assn.addRow (vars, new int[] { 1, 0 });
Assignment assn2 = (Assignment) assn.marginalize (vars[0]);
assertEquals (2, assn2.numRows ());
assertEquals (1, assn2.size ());
assertEquals (vars[0], assn2.getVariable (0));
assertEquals (1, assn.get (0, vars[0]));
assertEquals (1, assn.get (1, vars[0]));
}
public void testMarginalizeOut ()
{
Assignment assn = new Assignment ();
assn.addRow (vars, new int[] { 1, 1 });
assn.addRow (vars, new int[] { 1, 0 });
Assignment assn2 = (Assignment) assn.marginalizeOut (vars[1]);
assertEquals (2, assn2.numRows ());
assertEquals (1, assn2.size ());
assertEquals (vars[0], assn2.getVariable (0));
assertEquals (1, assn.get (0, vars[0]));
assertEquals (1, assn.get (1, vars[0]));
}
public void testUnion ()
{
Assignment assn1 = new Assignment ();
assn1.addRow (new Variable[] { vars[0] }, new int[] { 1 });
Assignment assn2 = new Assignment ();
assn2.addRow (new Variable[] { vars[1] }, new int[] { 0 });
Assignment assn3 = Assignment.union (assn1, assn2);
assertEquals (1, assn3.numRows ());
assertEquals (2, assn3.numVariables ());
assertEquals (1, assn3.get (0, vars[0]));
assertEquals (0, assn3.get (0, vars[1]));
}
public void testMultiRow ()
{
Assignment assn = new Assignment ();
assn.addRow (vars, new int[] { 1, 1 });
assn.addRow (vars, new int[] { 1, 0 });
assertEquals (2, assn.numRows ());
assertEquals (1, assn.get (0, vars[1]));
assertEquals (0, assn.get (1, vars[1]));
try {
assn.get (vars[1]);
fail ();
} catch (IllegalArgumentException e) {}
}
public void testSetRow ()
{
Assignment assn = new Assignment ();
assn.addRow (vars, new int[] { 1, 1 });
assn.addRow (vars, new int[] { 1, 0 });
assertEquals (1, assn.get (0, vars[0]));
assn.setRow (0, new int[] { 0, 0 });
assertEquals (2, assn.numRows ());
assertEquals (0, assn.get (0, vars[0]));
assertEquals (0, assn.get (0, vars[1]));
assertEquals (1, assn.get (1, vars[0]));
assertEquals (0, assn.get (1, vars[1]));
}
public void testSetRowFromAssn ()
{
Assignment assn = new Assignment ();
assn.addRow (vars, new int[] { 1, 1 });
assn.addRow (vars, new int[] { 1, 0 });
assertEquals (1, assn.get (0, vars[0]));
Assignment assn2 = new Assignment ();
assn2.addRow (vars, new int[] { 0, 0 });
assn.setRow (0, assn2);
assertEquals (2, assn.numRows ());
assertEquals (0, assn.get (0, vars[0]));
assertEquals (0, assn.get (0, vars[1]));
assertEquals (1, assn.get (1, vars[0]));
assertEquals (0, assn.get (1, vars[1]));
}
public void testSetValue ()
{
Assignment assn = new Assignment ();
assn.addRow (vars, new int[] { 1, 1 });
assn.setValue (vars[0], 0);
assertEquals (1, assn.numRows ());
assertEquals (0, assn.get (0, vars[0]));
assertEquals (1, assn.get (0, vars[1]));
}
public void testSetValueDup ()
{
Assignment assn = new Assignment ();
assn.addRow (vars, new int[] { 1, 1 });
Assignment dup = (Assignment) assn.duplicate ();
dup.setValue (vars[0], 0);
assertEquals (1, dup.numRows ());
assertEquals (0, dup.get (0, vars[0]));
assertEquals (1, dup.get (0, vars[1]));
}
public void testSetValueExpand ()
{
Assignment assn = new Assignment ();
assn.addRow (vars, new int[] { 0, 0 });
Variable v3 = new Variable (2);
assn.setValue (v3, 1);
assertEquals (3, assn.size ());
assertEquals (0, assn.get (vars[0]));
assertEquals (0, assn.get (vars[1]));
assertEquals (1, assn.get (v3));
}
public void testAsTable ()
{
Assignment assn = new Assignment ();
assn.addRow (vars, new int[] { 1, 1 });
assn.addRow (vars, new int[] { 1, 0 });
assn.addRow (vars, new int[] { 1, 0 });
AbstractTableFactor tbl = assn.asTable ();
TableFactor exp = new TableFactor (vars, new double[] { 0, 0, 2, 1 });
assertTrue (exp.almostEquals (tbl));
}
public void testAddRowMixed ()
{
Assignment assn = new Assignment ();
assn.addRow (vars, new int[] { 1, 1 });
assn.addRow (vars, new int[] { 1, 0 });
Assignment assn2 = new Assignment ();
assn2.addRow (new Variable[] { vars[1], vars[0] }, new int[] { 0, 1 });
assn.addRow (assn2);
AbstractTableFactor tbl = assn.asTable ();
TableFactor exp = new TableFactor (vars, new double[] { 0, 0, 2, 1 });
assertTrue (exp.almostEquals (tbl));
}
public static Test suite()
{
return new TestSuite (TestAssignment.class);
}
public static void main(String[] args) throws Exception
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite();
for (int i = 0; i < args.length; i++) {
theSuite.addTest(new TestAssignment (args[i]));
}
} else {
theSuite = (TestSuite) TestAssignment.suite ();
}
junit.textui.TestRunner.run(theSuite);
}
}
| 7,645 | 28.295019 | 82 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestPottsFactor.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import cc.mallet.grmm.types.*;
import junit.framework.*;
/**
* $Id: TestPottsFactor.java,v 1.1 2007/10/22 21:37:41 mccallum Exp $
*/
public class TestPottsFactor extends TestCase {
private PottsTableFactor factor;
private Variable alpha;
private VarSet vars;
public TestPottsFactor (String name)
{
super (name);
}
/**
* @return a <code>TestSuite</code>
*/
public static TestSuite suite ()
{
return new TestSuite (TestPottsFactor.class);
}
protected void setUp () throws Exception
{
alpha = new Variable (Variable.CONTINUOUS);
Variable v1 = new Variable (2);
Variable v2 = new Variable (2);
vars = new HashVarSet (new Variable[] { v1,v2 });
factor = new PottsTableFactor (vars, alpha);
}
public void testSlice ()
{
Assignment assn = new Assignment (alpha, 1.0);
Factor sliced = factor.slice (assn);
assertTrue (sliced instanceof AbstractTableFactor);
assertTrue (sliced.varSet ().equals (vars));
TableFactor expected = new TableFactor (vars, new double[] { 1.0, Math.exp(-1), Math.exp(-1), 1.0 });
assertTrue (sliced.almostEquals (expected));
}
public void testSumGradLog ()
{
Assignment alphaAssn = new Assignment (alpha, 1.0);
double[] values = new double[] { 0.4, 0.1, 0.3, 0.2 };
Factor q = new TableFactor (vars, values);
double grad = factor.sumGradLog (q, alpha, alphaAssn);
assertEquals (-0.4, grad, 1e-5);
}
public void testSumGradLog2 ()
{
Assignment alphaAssn = new Assignment (alpha, 1.0);
double[] values = new double[] { 0.4, 0.1, 0.3, 0.2 };
Factor q1 = new TableFactor (vars, values);
Factor q2 = new TableFactor (new Variable(2), new double[] { 0.7, 0.3 });
Factor q = q1.multiply (q2);
double grad = factor.sumGradLog (q, alpha, alphaAssn);
assertEquals (-0.4, grad, 1e-5);
}
public static void main (String[] args)
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestPottsFactor (args[i]));
}
} else {
theSuite = suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 2,653 | 25.808081 | 105 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestHashClique.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import junit.framework.*;
import java.util.Arrays;
import cc.mallet.grmm.types.HashVarSet;
import cc.mallet.grmm.types.Variable;
/**
* Created: Aug 22, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestHashClique.java,v 1.1 2007/10/22 21:37:41 mccallum Exp $
*/
public class TestHashClique extends TestCase {
public TestHashClique (String name)
{
super (name);
}
public void testEqualsHashCode ()
{
Variable[] vars = new Variable [4];
for (int i = 0; i < vars.length; i++) {
vars[i] = new Variable(3);
}
HashVarSet c1 = new HashVarSet (vars);
HashVarSet c2 = new HashVarSet (vars);
assertTrue(c1.equals (c2));
assertTrue(c2.equals (c1));
assertEquals (c1.hashCode(), c2.hashCode ());
}
public void testAddAllOrdering ()
{
for (int rep = 0; rep < 1000; rep++) {
Variable[] vars = new Variable[] { new Variable(2), new Variable (2) };
HashVarSet vs = new HashVarSet (vars);
checkOrdering (vs, vars);
}
}
public void testAddAllOrdering2 ()
{
for (int rep = 0; rep < 1000; rep++) {
Variable[] vars = new Variable[] { new Variable(2), new Variable (2) };
HashVarSet vs = new HashVarSet ();
vs.addAll (Arrays.asList (vars));
checkOrdering (vs, vars);
}
}
public void testAddAllOrdering3 ()
{
for (int rep = 0; rep < 1000; rep++) {
Variable[] vars = new Variable[] { new Variable(2), new Variable (2) };
HashVarSet vsOld = new HashVarSet (vars);
HashVarSet vs = new HashVarSet (vsOld);
checkOrdering (vs, vars);
}
}
private void checkOrdering (HashVarSet vs, Variable[] vars)
{
assertEquals (vars.length, vs.size ());
for (int i = 0; i < vars.length; i++) {
assertEquals (vars[i], vs.get (i));
}
}
public static Test suite ()
{
return new TestSuite (TestHashClique.class);
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestHashClique (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 2,750 | 25.970588 | 77 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestUniformFactor.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import junit.framework.*;
import gnu.trove.TDoubleArrayList;
import java.io.BufferedReader;
import java.io.StringReader;
import java.io.IOException;
import cc.mallet.grmm.types.*;
import cc.mallet.grmm.util.ModelReader;
import cc.mallet.types.MatrixOps;
import cc.mallet.util.Randoms;
/**
* $Id: TestUniformFactor.java,v 1.1 2007/10/22 21:37:41 mccallum Exp $
*/
public class TestUniformFactor extends TestCase {
public TestUniformFactor (String name)
{
super (name);
}
public void testVarSet ()
{
Variable var = new Variable (Variable.CONTINUOUS);
Factor f = new UniformFactor (var, -1.0, 1.5);
assertEquals (1, f.varSet ().size ());
assertTrue (f.varSet().contains (var));
}
public void testSample ()
{
Variable var = new Variable (Variable.CONTINUOUS);
Randoms r = new Randoms (2343);
Factor f = new UniformFactor (var, -1.0, 1.5);
TDoubleArrayList lst = new TDoubleArrayList ();
for (int i = 0; i < 10000; i++) {
Assignment assn = f.sample (r);
lst.add (assn.getDouble (var));
}
double[] vals = lst.toNativeArray ();
double mean = MatrixOps.mean (vals);
assertEquals (0.25, mean, 0.01);
}
static String mdlstr = "VAR u1 u2 : continuous\n" +
"u1 ~ Uniform 0.0 10.0\n" +
"u2 ~ Uniform 5.0 7.0\n";
public void testSliceInFg () throws IOException
{
ModelReader reader = new ModelReader ();
FactorGraph fg = reader.readModel (new BufferedReader (new StringReader (mdlstr)));
Variable u1 = fg.findVariable ("u1");
Variable u2 = fg.findVariable ("u2");
Assignment assn = new Assignment (new Variable[] { u1, u2 }, new double[] { 6.0, 6.0 });
FactorGraph fg2 = (FactorGraph) fg.slice (assn);
fg2.dump ();
assertEquals (2, fg2.factors ().size ());
assertEquals (1.0/20, fg2.value (new Assignment ()), 1e-5);
fg2.addFactor (new ConstantFactor (10.0));
assertEquals (0.5, fg2.value (new Assignment ()), 1e-5);
}
/**
* @return a <code>TestSuite</code>
*/
public static TestSuite suite ()
{
return new TestSuite (TestUniformFactor.class);
}
public static void main (String[] args)
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestUniformFactor (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 2,928 | 27.715686 | 92 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestBetaFactor.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import gnu.trove.TDoubleArrayList;
import java.io.IOException;
import java.io.BufferedReader;
import java.io.StringReader;
import cc.mallet.grmm.types.*;
import cc.mallet.grmm.util.ModelReader;
import cc.mallet.types.MatrixOps;
import cc.mallet.util.Randoms;
/**
* $Id: TestBetaFactor.java,v 1.1 2007/10/22 21:37:41 mccallum Exp $
*/
public class TestBetaFactor extends TestCase {
public TestBetaFactor (String name)
{
super (name);
}
public void testVarSet ()
{
Variable var = new Variable (Variable.CONTINUOUS);
Factor f = new BetaFactor (var, 0.5, 0.5);
assertEquals (1, f.varSet ().size ());
assertTrue (f.varSet().contains (var));
}
public void testValue ()
{
Variable var = new Variable (Variable.CONTINUOUS);
Factor f = new BetaFactor (var, 1.0, 1.2);
Assignment assn = new Assignment (var, 0.7);
assertEquals (0.94321, f.value(assn), 1e-5);
}
public void testSample ()
{
Variable var = new Variable (Variable.CONTINUOUS);
Randoms r = new Randoms (2343);
Factor f = new BetaFactor (var, 0.7, 0.5);
TDoubleArrayList lst = new TDoubleArrayList ();
for (int i = 0; i < 100000; i++) {
Assignment assn = f.sample (r);
lst.add (assn.getDouble (var));
}
double[] vals = lst.toNativeArray ();
double mean = MatrixOps.mean (vals);
assertEquals (0.7 / (0.5 + 0.7), mean, 0.01);
}
public void testSample2 ()
{
Variable var = new Variable (Variable.CONTINUOUS);
Randoms r = new Randoms (2343);
Factor f = new BetaFactor (var, 0.7, 0.5, 3.0, 8.0);
TDoubleArrayList lst = new TDoubleArrayList ();
for (int i = 0; i < 100000; i++) {
Assignment assn = f.sample (r);
lst.add (assn.getDouble (var));
}
double[] vals = lst.toNativeArray ();
double mean = MatrixOps.mean (vals);
assertEquals (5.92, mean, 0.01);
}
static String mdlstr = "VAR u1 u2 : continuous\n" +
"u1 ~ Beta 0.2 0.7\n" +
"u2 ~ Beta 1.0 0.3\n";
public void testSliceInFg () throws IOException
{
ModelReader reader = new ModelReader ();
FactorGraph fg = reader.readModel (new BufferedReader (new StringReader (TestBetaFactor.mdlstr)));
Variable u1 = fg.findVariable ("u1");
Variable u2 = fg.findVariable ("u2");
Assignment assn = new Assignment (new Variable[] { u1, u2 }, new double[] { 0.25, 0.85 });
FactorGraph fg2 = (FactorGraph) fg.slice (assn);
assertEquals (2, fg2.factors ().size ());
assertEquals (0.59261 * 1.13202, fg2.value (new Assignment ()), 1e-5);
}
/**
* @return a <code>TestSuite</code>
*/
public static TestSuite suite ()
{
return new TestSuite (TestBetaFactor.class);
}
public static void main (String[] args)
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestBetaFactor (args[i]));
}
} else {
theSuite = (TestSuite) TestBetaFactor.suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 3,585 | 28.393443 | 102 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestAssignmentIterator.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import cc.mallet.grmm.types.*;
import junit.framework.TestCase;
import junit.framework.Test;
import junit.framework.TestSuite;
/**
* Created: Aug 11, 2004
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestAssignmentIterator.java,v 1.1 2007/10/22 21:37:41 mccallum Exp $
*/
public class TestAssignmentIterator extends TestCase {
/**
* Constructs a test case with the given name.
*/
public TestAssignmentIterator (String name)
{
super (name);
}
public void testSum ()
{
Variable vars [] = {
new Variable (2),
new Variable (2),
};
double[] probs = { 0.1, 10.3, 17, 0.5 };
TableFactor ptl = new TableFactor (vars, probs);
AssignmentIterator it = ptl.assignmentIterator ();
double total = 0;
while (it.hasNext ()) {
total += ptl.value (it);
it.advance ();
}
assertEquals (27.9, total, 0.01);
}
public void testLazyAssignment ()
{
Variable vars [] = {
new Variable (2),
new Variable (2),
};
double[] probs = { 0.1, 10.3, 17, 0.5 };
TableFactor ptl = new TableFactor (vars, probs);
AssignmentIterator it = ptl.assignmentIterator ();
it.advance ();
it.advance ();
Assignment assn = it.assignment ();
assertEquals (2, assn.size ());
assertEquals (1, assn.get (vars [0]));
assertEquals (0, assn.get (vars [1]));
}
public void testSparseMatrixN ()
{
Variable x1 = new Variable (2);
Variable x2 = new Variable (2);
Variable alpha = new Variable (Variable.CONTINUOUS);
Factor potts = new PottsTableFactor (x1, x2, alpha);
Assignment alphAssn = new Assignment (alpha, 1.0);
Factor tbl = potts.slice (alphAssn);
System.out.println (tbl.dumpToString ());
int j = 0;
double[] vals = new double[] { 0, -1, -1, 0 };
for (AssignmentIterator it = tbl.assignmentIterator (); it.hasNext ();) {
assertEquals (vals[j++], tbl.logValue (it), 1e-5);
it.advance ();
}
}
public static Test suite()
{
return new TestSuite(TestAssignmentIterator.class);
}
public static void main(String[] args) throws Exception
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite();
for (int i = 0; i < args.length; i++) {
theSuite.addTest(new TestAssignmentIterator(args[i]));
}
} else {
theSuite = (TestSuite) suite();
}
junit.textui.TestRunner.run(theSuite);
}
}
| 2,941 | 25.504505 | 85 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestUndirectedModel.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.*;
import org._3pq.jgrapht.UndirectedGraph;
import org._3pq.jgrapht.GraphHelper;
import cc.mallet.grmm.inference.RandomGraphs;
import cc.mallet.grmm.types.*;
import cc.mallet.grmm.util.Graphs;
import cc.mallet.util.ArrayUtils;
/**
* Created: Mar 17, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestUndirectedModel.java,v 1.1 2007/10/22 21:37:41 mccallum Exp $
*/
public class TestUndirectedModel extends TestCase {
public TestUndirectedModel (String name)
{
super (name);
}
public void testOutputToDot () throws IOException
{
FactorGraph mdl = TestInference.createRandomGrid (3, 4, 2, new Random (4234));
PrintWriter out = new PrintWriter (new FileWriter (new File ("grmm-model.dot")));
mdl.printAsDot (out);
out.close ();
System.out.println ("Now you can open up grmm-model.dot in Graphviz.");
}
/**
* Tests that models can be created that have multiple factors over the same variable, and that
* potentialOfVertex returns the product in that case.
*/
public void testMultipleNodePotentials ()
{
Variable var = new Variable (2);
FactorGraph mdl = new FactorGraph (new Variable[]{var});
Factor ptl1 = new TableFactor (var, new double[]{0.5, 0.5});
mdl.addFactor (ptl1);
Factor ptl2 = new TableFactor (var, new double[]{0.25, 0.25});
mdl.addFactor (ptl2);
// verify that factorOf(var) doesn't work
try {
mdl.factorOf (var);
fail ();
} catch (RuntimeException e) {} // expected
List factors = mdl.allFactorsOf (var);
Factor total = TableFactor.multiplyAll (factors);
double[] expected = {0.125, 0.125};
assertTrue ("Arrays not equal\n Expected " + ArrayUtils.toString (expected)
+ "\n Actual " + ArrayUtils.toString (((TableFactor) total).toValueArray ()),
Arrays.equals (expected, ((TableFactor) total).toValueArray ()));
}
/**
* Tests that models can be created that have multiple factors over the same edge, and that
* potentialOfEdge returns the product in that case.
*/
public void testMultipleEdgePotentials ()
{
Variable v1 = new Variable (2);
Variable v2 = new Variable (2);
Variable[] vars = new Variable[]{v1, v2};
FactorGraph mdl = new FactorGraph (vars);
Factor ptl1 = new TableFactor (vars, new double[]{0.5, 0.5, 0.5, 0.5});
mdl.addFactor (ptl1);
Factor ptl2 = new TableFactor (vars, new double[]{0.25, 0.25, 0.5, 0.5});
mdl.addFactor (ptl2);
try {
mdl.factorOf (v1, v2);
fail ();
} catch (RuntimeException e) {}
Collection factors = mdl.allFactorsContaining (new HashVarSet (vars));
assertEquals (2, factors.size ());
assertTrue (factors.contains (ptl1));
assertTrue (factors.contains (ptl2));
double[] vals = {0.125, 0.125, 0.25, 0.25};
Factor total = TableFactor.multiplyAll (factors);
Factor expected = new TableFactor (vars, vals);
assertTrue ("Arrays not equal\n Expected " + ArrayUtils.toString (vals)
+ "\n Actual " + ArrayUtils.toString (((TableFactor) total).toValueArray ()),
expected.almostEquals (total, 1e-10));
}
public void testPotentialConnections ()
{
Variable v1 = new Variable (2);
Variable v2 = new Variable (2);
Variable v3 = new Variable (2);
Variable[] vars = new Variable[]{v1, v2, v3};
FactorGraph mdl = new FactorGraph ();
TableFactor ptl = new TableFactor (vars, new double [8]);
mdl.addFactor (ptl);
assertTrue (mdl.isAdjacent (v1, v2));
assertTrue (mdl.isAdjacent (v2, v3));
assertTrue (mdl.isAdjacent (v1, v3));
}
public void testThreeNodeModel ()
{
Random r = new Random (23534709);
FactorGraph mdl = new FactorGraph ();
Variable root = new Variable (2);
Variable childL = new Variable (2);
Variable childR = new Variable (2);
mdl.addFactor (root, childL, RandomGraphs.generateMixedPotentialValues (r, 1.5));
mdl.addFactor (root, childR, RandomGraphs.generateMixedPotentialValues (r, 1.5));
// assertTrue (mdl.isConnected (root, childL));
// assertTrue (mdl.isConnected (root, childR));
// assertTrue (mdl.isConnected (childL, childR));
assertTrue (mdl.isAdjacent (root, childR));
assertTrue (mdl.isAdjacent (root, childL));
assertTrue (!mdl.isAdjacent (childL, childR));
assertTrue (mdl.factorOf (root, childL) != null);
assertTrue (mdl.factorOf (root, childR) != null);
}
// Verify that potentialOfVertex and potentialOfEdge (which use
// caches) are consistent with the potentials set.
public void testUndirectedCaches ()
{
List models = TestInference.createTestModels ();
for (Iterator it = models.iterator (); it.hasNext ();) {
FactorGraph mdl = (FactorGraph) it.next ();
verifyCachesConsistent (mdl);
}
}
private void verifyCachesConsistent (FactorGraph mdl)
{
Factor pot, pot2, pot3;
for (Iterator it = mdl.factors ().iterator (); it.hasNext ();) {
pot = (Factor) it.next ();
// System.out.println("Testing model "+i+" potential "+pot);
Object[] vars = pot.varSet ().toArray ();
switch (vars.length) {
case 1:
pot2 = mdl.factorOf ((Variable) vars[0]);
assertTrue (pot == pot2);
break;
case 2:
Variable var1 = (Variable) vars[0];
Variable var2 = (Variable) vars[1];
pot2 = mdl.factorOf (var1, var2);
pot3 = mdl.factorOf (var2, var1);
assertTrue (pot == pot2);
assertTrue (pot2 == pot3);
break;
// Factors of size > 2 aren't now cached.
default:
break;
}
}
}
// Verify that potentialOfVertex and potentialOfEdge (which use
// caches) are consistent with the potentials set even if a vertex is removed.
public void testUndirectedCachesAfterRemove ()
{
List models = TestInference.createTestModels ();
for (Iterator mdlIt = models.iterator (); mdlIt.hasNext ();) {
FactorGraph mdl = (FactorGraph) mdlIt.next ();
mdl = (FactorGraph) mdl.duplicate ();
mdl.remove (mdl.get (0));
// Verify that indexing correct
for (Iterator it = mdl.variablesIterator (); it.hasNext ();) {
Variable var = (Variable) it.next ();
int idx = mdl.getIndex (var);
assertTrue (idx >= 0);
assertTrue (idx < mdl.numVariables ());
}
// Verify that caches consistent
verifyCachesConsistent (mdl);
}
}
public void testMdlToGraph ()
{
List models = TestInference.createTestModels ();
for (Iterator mdlIt = models.iterator (); mdlIt.hasNext ();) {
UndirectedModel mdl = (UndirectedModel) mdlIt.next ();
UndirectedGraph g = Graphs.mdlToGraph (mdl);
Set vertices = g.vertexSet ();
// check the number of vertices
assertEquals (mdl.numVariables (), vertices.size ());
// check the number of edges
int numEdgePtls = 0;
for (Iterator factorIt = mdl.factors ().iterator (); factorIt.hasNext ();) {
Factor factor = (Factor) factorIt.next ();
if (factor.varSet ().size() == 2) numEdgePtls++;
}
assertEquals (numEdgePtls, g.edgeSet ().size ());
// check that the neighbors of each vertex contain at least some of what they're supposed to
Iterator it = vertices.iterator ();
while (it.hasNext ()) {
Variable var = (Variable) it.next ();
assertTrue (vertices.contains (var));
Set neighborsInG = new HashSet (GraphHelper.neighborListOf (g, var));
neighborsInG.add (var);
Iterator factorIt = mdl.allFactorsContaining (var).iterator ();
while (factorIt.hasNext ()) {
Factor factor = (Factor) factorIt.next ();
assertTrue (neighborsInG.containsAll (factor.varSet ()));
}
}
}
}
public void testFactorOfSet ()
{
Variable[] vars = new Variable [3];
for (int i = 0; i < vars.length; i++) {
vars[i] = new Variable (2);
}
Factor factor = new TableFactor (vars, new double[] { 0, 1, 2, 3, 4, 5, 6, 7 });
FactorGraph fg = new FactorGraph (vars);
fg.addFactor (factor);
assertTrue (factor == fg.factorOf (factor.varSet ()));
HashSet set = new HashSet (factor.varSet ());
assertTrue (factor == fg.factorOf (set));
set.remove (vars[0]);
assertTrue (null == fg.factorOf (set));
}
public static Test suite ()
{
return new TestSuite (TestUndirectedModel.class);
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestUndirectedModel (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 9,578 | 31.252525 | 98 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestAbstractBeliefPropagation.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import junit.framework.*;
import java.util.List;
import java.util.ArrayList;
import cc.mallet.grmm.inference.Inferencer;
import cc.mallet.grmm.inference.TRP;
import cc.mallet.grmm.types.*;
import cc.mallet.util.Randoms;
/**
* $Id: TestAbstractBeliefPropagation.java,v 1.1 2007/10/22 21:37:40 mccallum Exp $
*/
public class TestAbstractBeliefPropagation extends TestCase {
public TestAbstractBeliefPropagation (String name)
{
super (name);
}
/**
* @return a <code>TestSuite</code>
*/
public static TestSuite suite ()
{
return new TestSuite (TestAbstractBeliefPropagation.class);
}
public void testBadVariable ()
{
FactorGraph fg = createBoltzmannChain (5);
Assignment assn = fg.sampleContinuousVars (new Randoms (23423));
FactorGraph sliced = (FactorGraph) fg.slice (assn);
Inferencer bp = new TRP ();
bp.computeMarginals (sliced);
try {
bp.lookupMarginal (new Variable (2));
fail ("Expected exception");
} catch (IllegalArgumentException e) {
// expected
System.out.println ("OK: As expected, got exception "+e);
}
}
static FactorGraph createBoltzmannChain (int len)
{
List<Variable> vars = new ArrayList<Variable> ();
for (int i = 0; i < len; i++) {
Variable x_i = new Variable (2);
x_i.setLabel ("X_"+i);
vars.add (x_i);
}
List<Factor> factors = new ArrayList<Factor> (vars.size ());
// node factors
for (int i = 0; i < len; i++) {
Variable u = new Variable (Variable.CONTINUOUS);
u.setLabel ("U_"+i);
factors.add (new UniformFactor (u, -4.0, 4.0));
factors.add (new BoltzmannUnaryFactor (vars.get (i), u));
}
// edge factors
for (int i = 0; i < len-1; i++) {
Variable alpha = new Variable (Variable.CONTINUOUS);
alpha.setLabel ("ALPHA_"+i);
factors.add (new UniformFactor (alpha, -4.0, 4.0));
factors.add (new PottsTableFactor (vars.get (i), vars.get(i+1), alpha));
}
return new FactorGraph (factors);
}
public static void main (String[] args)
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestAbstractBeliefPropagation (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 2,846 | 26.640777 | 83 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestRandomGraphs.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import junit.framework.*;
import junit.textui.TestRunner;
import java.util.Random;
import java.util.Iterator;
import java.io.File;
import java.io.FileWriter;
import java.io.PrintWriter;
import java.io.IOException;
import cc.mallet.grmm.inference.*;
import cc.mallet.grmm.types.*;
/**
* Created: Mar 26, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestRandomGraphs.java,v 1.1 2007/10/22 21:37:40 mccallum Exp $
*/
public class TestRandomGraphs extends TestCase {
public TestRandomGraphs (String name)
{
super (name);
}
public static Test suite ()
{
return new TestSuite (TestRandomGraphs.class);
}
public void testAttractiveGraphs () throws IOException
{
Random r = new Random (31421);
for (int rep = 0; rep < 5; rep++) {
FactorGraph mdl = RandomGraphs.randomAttractiveGrid (5, 0.5, r);
System.out.println ("************"); mdl.dump ();
TRP trp = TRP.createForMaxProduct ();
trp.computeMarginals (mdl);
Assignment assn = trp.bestAssignment ();
PrintWriter out = new PrintWriter (new FileWriter (new File ("attract."+rep+".dot")));
mdl.printAsDot (out, assn);
out.close ();
}
}
public void testRepulsiveGraphs () throws IOException
{
Random r = new Random (31421);
for (int rep = 0; rep < 5; rep++) {
FactorGraph mdl = RandomGraphs.randomRepulsiveGrid (5, 0.5, r);
TRP trp = TRP.createForMaxProduct ();
trp.computeMarginals (mdl);
Assignment assn = trp.bestAssignment ();
PrintWriter out = new PrintWriter (new FileWriter (new File ("repulse."+rep+".dot")));
mdl.printAsDot (out, assn);
out.close ();
}
}
public void testFrustratedGraphs () throws IOException
{
Random r = new Random (31421);
for (int rep = 0; rep < 5; rep++) {
FactorGraph mdl = RandomGraphs.randomFrustratedGrid (5, 0.5, r);
TRP trp = TRP.createForMaxProduct ();
trp.computeMarginals (mdl);
Assignment assn = trp.bestAssignment ();
PrintWriter out = new PrintWriter (new FileWriter (new File ("mixed."+rep+".dot")));
mdl.printAsDot (out, assn);
out.close ();
}
}
public void testFrustratedIsGrid () throws IOException
{
Random r = new Random (0);
for (int rep = 0; rep < 100; rep++) {
FactorGraph mdl = RandomGraphs.randomFrustratedGrid (10, 1.0, r);
// 100 variable factors + 180 edge factors
assertEquals (280, mdl.factors ().size ());
assertEquals (100, mdl.numVariables ());
int[] counts = new int [6];
for (int i = 0; i < mdl.numVariables (); i++) {
Variable var = mdl.get (i);
int degree = mdl.getDegree (var);
assertTrue ("Variable "+var+" has degree "+degree, (degree >= 3) && (degree <= 5));
counts[degree]++;
}
assertEquals (counts[0], 0);
assertEquals (counts[1], 0);
assertEquals (counts[2], 0);
assertEquals (counts[3], 4);
assertEquals (counts[4], 32);
assertEquals (counts[5], 64);
}
}
public void testUniformGrid ()
{
UndirectedGrid grid = (UndirectedGrid) RandomGraphs.createUniformGrid (3);
assertEquals (9, grid.numVariables ());
assertEquals (12, grid.factors ().size());
BruteForceInferencer inf = new BruteForceInferencer ();
TableFactor joint = (TableFactor) inf.joint (grid);
for (AssignmentIterator it = joint.assignmentIterator (); it.hasNext(); it.advance ()) {
assertEquals (-9 * Math.log (2), joint.logValue (it), 1e-3);
}
}
public void testUniformGridWithObservations ()
{
FactorGraph grid = RandomGraphs.createGridWithObs (
new RandomGraphs.UniformFactorGenerator (),
new RandomGraphs.UniformFactorGenerator (),
3);
assertEquals (18, grid.numVariables ());
assertEquals (12 + 9, grid.factors ().size());
Inferencer inf = new LoopyBP ();
inf.computeMarginals (grid);
for (Iterator it = grid.variablesIterator (); it.hasNext ();) {
Variable var = (Variable) it.next ();
Factor marg = inf.lookupMarginal (var);
for (AssignmentIterator assnIt = marg.assignmentIterator (); assnIt.hasNext();) {
assertEquals (-Math.log (2), marg.logValue (assnIt), 1e-3);
assnIt.advance ();
}
}
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestRandomGraphs (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
TestRunner.run (theSuite);
}
}
| 5,141 | 30.546012 | 92 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestTRP.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import junit.framework.*;
import java.util.Iterator;
import java.util.Random;
import cc.mallet.grmm.inference.RandomGraphs;
import cc.mallet.grmm.inference.TRP;
import cc.mallet.grmm.types.Factor;
import cc.mallet.grmm.types.FactorGraph;
/**
* $Id: TestTRP.java,v 1.1 2007/10/22 21:37:40 mccallum Exp $
*/
public class TestTRP extends TestCase {
public TestTRP (String name)
{
super (name);
}
public void testEarlyStopping ()
{
FactorGraph grid = RandomGraphs.randomAttractiveGrid (5, 0.5, new Random (2413421));
TRP trp = new TRP (new TRP.IterationTerminator (1));
trp.setRandomSeed (14312341);
trp.computeMarginals (grid);
boolean oneIsDifferent = false;
// check no exceptions thrown when asking for all marginals,
// and check that at least one factors' belief has changed
// from the choice at zero iterations.
for (Iterator it = grid.factorsIterator (); it.hasNext();) {
Factor f = (Factor) it.next ();
Factor marg = trp.lookupMarginal (f.varSet ());// test no exception thrown
if (!marg.almostEquals (f.duplicate ().normalize ())) {
oneIsDifferent = true;
}
}
assertTrue (oneIsDifferent);
}
/**
* @return a <code>TestSuite</code>
*/
public static TestSuite suite ()
{
return new TestSuite (TestTRP.class);
}
public static void main (String[] args)
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestTRP (args[i]));
}
} else {
theSuite = suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 2,122 | 26.217949 | 88 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestMIntInt2ObjectMap.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import junit.framework.*;
import java.io.IOException;
import cc.mallet.grmm.util.MIntInt2ObjectMap;
import cc.mallet.types.tests.TestSerializable;
/**
* Created: Dec 14, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestMIntInt2ObjectMap.java,v 1.1 2007/10/22 21:37:41 mccallum Exp $
*/
public class TestMIntInt2ObjectMap extends TestCase {
public TestMIntInt2ObjectMap (String name)
{
super (name);
}
public static Test suite ()
{
return new TestSuite (TestMIntInt2ObjectMap.class);
}
public void testReverse ()
{
MIntInt2ObjectMap map = new MIntInt2ObjectMap ();
map.put (0, 2, "A");
map.put (2, 0, "a");
map.put (0, 5, "C");
map.put (3, 1, "D");
map.put (2, 0, "aa");
assertEquals (4, map.size ());
assertEquals ("A", map.get (0, 2));
assertEquals ("aa", map.get (2, 0));
}
public void testSerializable () throws IOException, ClassNotFoundException
{
MIntInt2ObjectMap map = new MIntInt2ObjectMap ();
map.put (0, 2, "A");
map.put (2, 0, "a");
map.put (0, 5, "C");
map.put (3, 1, "D");
map.put (2, 0, "aa");
MIntInt2ObjectMap map2 = (MIntInt2ObjectMap) TestSerializable.cloneViaSerialization (map);
assertEquals (4, map2.size ());
assertEquals ("A", map2.get (0, 2));
assertEquals ("aa", map2.get (2, 0));
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestMIntInt2ObjectMap (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 2,208 | 26.962025 | 94 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestLogTableFactor.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import java.io.IOException;
import java.util.Set;
import cc.mallet.grmm.types.*;
import cc.mallet.types.SparseMatrixn;
import cc.mallet.types.tests.TestSerializable;
import cc.mallet.util.ArrayUtils;
import cc.mallet.util.Maths;
import cc.mallet.util.Randoms;
/**
* Created: Aug 17, 2004
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestLogTableFactor.java,v 1.1 2007/10/22 21:37:41 mccallum Exp $
*/
public class TestLogTableFactor extends TestCase {
public TestLogTableFactor (String name)
{
super (name);
}
public void testTimesTableFactor ()
{
Variable var = new Variable (4);
double[] vals = new double[]{ 2.0, 4.0, 6.0, 8.0 };
double[] vals2 = new double [] { 0.5, 0.5, 0.5, 0.5 };
double[] vals3 = new double [] { 1, 2, 3, 4, };
TableFactor ans = new TableFactor (var, vals3);
TableFactor ptl1 = new TableFactor (var, vals);
LogTableFactor lptl2 = LogTableFactor.makeFromValues (var, vals2);
ptl1.multiplyBy (lptl2);
assertTrue (ans.almostEquals (ptl1));
}
public void testTblTblPlusEquals ()
{
Variable var = new Variable (4);
double[] vals = new double[]{ 2.0, 4.0, 6.0, 8.0 };
double[] vals2 = new double [] { 0.25, 0.5, 0.75, 1.0 };
double[] vals3 = new double [] { 2.25, 4.5, 6.75, 9.0, };
LogTableFactor ans = LogTableFactor.makeFromValues (var, vals3);
LogTableFactor ptl1 = LogTableFactor.makeFromValues (var, vals);
LogTableFactor ptl2 = LogTableFactor.makeFromValues (var, vals2);
ptl1.plusEquals (ptl2);
assertTrue (ans.almostEquals (ptl1));
}
public void testMultiplyByLogSpace ()
{
Variable var = new Variable (4);
double[] vals = new double[]{ 2.0, 4.0, 6.0, 8.0 };
double[] vals2 = new double [] { 0.5, 0.5, 0.5, 0.5 };
double[] vals3 = new double [] { 1, 2, 3, 4, };
TableFactor ans = new TableFactor (var, vals3);
TableFactor ptl1 = new TableFactor (var, vals);
TableFactor ptl2 = new TableFactor (var, vals2);
ptl1.multiplyBy (ptl2);
assertTrue (ans.almostEquals (ptl1));
TableFactor ptl3 = new TableFactor (var, vals);
LogTableFactor ptl4 = LogTableFactor.makeFromValues (var, vals2);
ptl3.multiplyBy (ptl4);
assertTrue (ptl3.almostEquals (ptl1));
TableFactor ptl5 = new TableFactor (var, vals);
LogTableFactor ptl6 = LogTableFactor.makeFromValues (var, vals2);
ptl6.multiplyBy (ptl5);
assertTrue (ptl6.almostEquals (ans));
LogTableFactor ptl7 = LogTableFactor.makeFromValues (var, vals);
LogTableFactor ptl8 = LogTableFactor.makeFromValues (var, vals2);
ptl8.multiplyBy (ptl7);
assertTrue (ptl8.almostEquals (ans));
}
public void testDivideByLogSpace ()
{
Variable var = new Variable (4);
double[] vals = new double[]{ 2.0, 4.0, 6.0, 8.0 };
double[] vals2 = new double [] { 0.5, 0.5, 0.5, 0.5 };
double[] vals3 = new double [] { 4, 8, 12, 16, };
TableFactor ans = new TableFactor (var, vals3);
TableFactor ptl1 = new TableFactor (var, vals);
TableFactor ptl2 = new TableFactor (var, vals2);
ptl1.divideBy (ptl2);
assertTrue (ans.almostEquals (ptl1));
TableFactor ptl3 = new TableFactor (var, vals);
LogTableFactor ptl4 = LogTableFactor.makeFromValues (var, vals2);
ptl3.divideBy (ptl4);
assertTrue (ptl3.almostEquals (ans));
LogTableFactor ptl5 = LogTableFactor.makeFromValues (var, vals);
TableFactor ptl6 = new TableFactor (var, vals2);
ptl5.divideBy (ptl6);
assertTrue (ptl5.almostEquals (ans));
LogTableFactor ptl7 = LogTableFactor.makeFromValues (var, vals);
LogTableFactor ptl8 = LogTableFactor.makeFromValues (var, vals2);
ptl7.divideBy (ptl8);
assertTrue (ptl7.almostEquals (ans));
}
public void testEntropyLogSpace ()
{
Variable v1 = new Variable (2);
TableFactor ptl = new TableFactor (v1, new double[] { 0.3, 0.7 });
double entropy = ptl.entropy ();
assertEquals (0.61086, entropy, 1e-3);
LogTableFactor ptl2 = LogTableFactor.makeFromValues (v1, new double[] { 0.3, 0.7 });
double entropy2 = ptl2.entropy ();
assertEquals (0.61086, entropy2, 1e-3);
}
// fails
public void ignoreTestSerialization () throws IOException, ClassNotFoundException
{
Variable v1 = new Variable (2);
Variable v2 = new Variable (3);
Variable[] vars = { v1, v2 };
double[] vals = new double[]{ 2.0, 4.0, 6.0, 3, 5, 7 };
LogTableFactor ptl = LogTableFactor.makeFromLogValues (vars, vals);
LogTableFactor ptl2 = (LogTableFactor) TestSerializable.cloneViaSerialization (ptl);
Set varset1 = ptl.varSet();
Set varset2 = ptl2.varSet();
assertTrue (!varset1.contains (varset2)); // Variables deep-cloned
// There's not way to get directly at the matrices...!
comparePotentialValues (ptl, ptl2);
LogTableFactor marg1 = (LogTableFactor) ptl.marginalize (v1);
LogTableFactor marg2 = (LogTableFactor) ptl2.marginalize (ptl2.findVariable (v1.getLabel ()));
comparePotentialValues (marg1, marg2);
}
private void comparePotentialValues (LogTableFactor ptl, LogTableFactor ptl2)
{
AssignmentIterator it1 = ptl.assignmentIterator ();
AssignmentIterator it2 = ptl2.assignmentIterator ();
while (it1.hasNext ()) {
assertTrue (ptl.value (it1) == ptl.value (it2));
it1.advance (); it2.advance ();
}
}
public void testExtractMaxLogSpace ()
{
Variable[] vars = new Variable[] { new Variable (2), new Variable (2) };
LogTableFactor ptl = LogTableFactor.makeFromValues (vars, new double[]{1, 2, 3, 4});
LogTableFactor ptl2 = (LogTableFactor) ptl.extractMax (vars[1]);
assertEquals ("FAILURE: Potential has too many vars.\n "+ptl2, 1, ptl2.varSet ().size ());
assertTrue ("FAILURE: Potential does not contain "+vars[1]+":\n "+ptl2, ptl2.varSet ().contains (vars[1]));
double[] expected = new double[] { 3, 4 };
assertTrue ("FAILURE: Potential has incorrect values. Expected "+ArrayUtils.toString (expected)+"was "+ptl2,
Maths.almostEquals (ptl2.toValueArray (), expected, 1e-5));
}
public void testLogValue ()
{
Variable[] vars = new Variable[] { new Variable (2), new Variable (2) };
LogTableFactor ptl = LogTableFactor.makeFromValues (vars, new double[] { 1, 2, 3, 4 });
Assignment assn = new Assignment (vars, new int [vars.length]);
assertEquals (0, ptl.logValue (assn), 1e-5);
assertEquals (0, ptl.logValue (ptl.assignmentIterator()), 1e-5);
assertEquals (0, ptl.logValue (0), 1e-5);
assertEquals (1, ptl.value (assn), 1e-5);
assertEquals (1, ptl.value (ptl.assignmentIterator()), 1e-5);
assertEquals (1, ptl.value (0), 1e-5);
LogTableFactor ptl2 = LogTableFactor.makeFromLogValues (vars, new double[] { 0, Math.log (2), Math.log (3), Math.log (4) });
Assignment assn2 = new Assignment (vars, new int [vars.length]);
assertEquals (0, ptl2.logValue (assn2), 1e-5);
assertEquals (0, ptl2.logValue (ptl2.assignmentIterator()), 1e-5);
assertEquals (0, ptl2.logValue (0), 1e-5);
assertEquals (1, ptl2.value (assn2), 1e-5);
assertEquals (1, ptl2.value (ptl2.assignmentIterator()), 1e-5);
assertEquals (1, ptl2.value (0), 1e-5);
}
public void testOneVarSlice ()
{
double[] vals = { 0.0, 1.3862943611198906, 0.6931471805599453, 1.791759469228055 };
Variable v1 = new Variable (2);
Variable v2 = new Variable (2);
Variable[] vars = new Variable[]{v1, v2};
Factor ptl = LogTableFactor.makeFromLogValues(vars, vals);
Assignment assn = new Assignment (v1, 0);
LogTableFactor sliced = (LogTableFactor) ptl.slice (assn);
LogTableFactor expected = LogTableFactor.makeFromValues (v2, new double[] { 1.0, 4.0 });
comparePotentialValues (sliced, expected);
assertEquals (1, assn.varSet ().size ());
}
public void testTwoVarSlice ()
{
double[] vals = { 0.0, 1, 2, 3, 4, 5, 6, 7 };
Variable v1 = new Variable (2);
Variable v2 = new Variable (2);
Variable v3 = new Variable (2);
Variable[] vars = new Variable[]{v1, v2, v3};
Factor ptl = LogTableFactor.makeFromValues(vars, vals);
Assignment assn = new Assignment (v3, 0);
LogTableFactor sliced = (LogTableFactor) ptl.slice (assn);
LogTableFactor expected = LogTableFactor.makeFromValues (new Variable[] { v1, v2 }, new double[] { 0, 2, 4, 6 });
comparePotentialValues (sliced, expected);
}
public void testMultiVarSlice ()
{
double[] vals = { 0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
Variable v1 = new Variable (2);
Variable v2 = new Variable (2);
Variable v3 = new Variable (2);
Variable v4 = new Variable (2);
Variable[] vars = new Variable[]{v1, v2, v3, v4};
Factor ptl = LogTableFactor.makeFromValues(vars, vals);
System.out.println (ptl);
Assignment assn = new Assignment (v4, 0);
LogTableFactor sliced = (LogTableFactor) ptl.slice (assn);
LogTableFactor expected = LogTableFactor.makeFromValues (new Variable[] { v1, v2, v3 }, new double[] { 0, 2, 4, 6, 8, 10, 12, 14 });
comparePotentialValues (sliced, expected);
}
public void testSparseValueAndLogValue ()
{
Variable[] vars = new Variable[] { new Variable (2), new Variable (2) };
int[] szs = { 2, 2 };
int[] idxs1 = new int[] { 1, 3 };
double[] vals1 = new double[]{ 4.0, 8.0 };
LogTableFactor ptl1 = LogTableFactor.makeFromMatrix (vars, new SparseMatrixn (szs, idxs1, vals1));
AssignmentIterator it = ptl1.assignmentIterator ();
assertEquals (1, it.indexOfCurrentAssn ());
assertEquals (Math.log (4), ptl1.logValue (it), 1e-5);
assertEquals (Math.log (4), ptl1.logValue (it.assignment ()), 1e-5);
assertEquals (4, ptl1.value (it), 1e-5);
assertEquals (4, ptl1.value (it.assignment ()), 1e-5);
it = ptl1.varSet ().assignmentIterator ();
assertEquals (0, it.indexOfCurrentAssn ());
assertEquals (Double.NEGATIVE_INFINITY, ptl1.logValue (it), 1e-5);
assertEquals (Double.NEGATIVE_INFINITY, ptl1.logValue (it.assignment ()), 1e-5);
assertEquals (0, ptl1.value (it), 1e-5);
assertEquals (0, ptl1.value (it.assignment ()), 1e-5);
}
public void testSparseMultiplyLogSpace ()
{
Variable[] vars = new Variable[] { new Variable (2), new Variable (2) };
int[] szs = { 2, 2 };
int[] idxs1 = new int[] { 0, 1, 3 };
double[] vals1 = new double[]{ 2.0, 4.0, 8.0 };
int[] idxs2 = new int[] { 0, 3 };
double[] vals2 = new double [] { 0.5, 0.5 };
double[] vals3 = new double [] { 1.0, 0, 4.0 };
LogTableFactor ptl1 = LogTableFactor.makeFromMatrix (vars, new SparseMatrixn (szs, idxs1, vals1));
LogTableFactor ptl2 = LogTableFactor.makeFromMatrix (vars, new SparseMatrixn (szs, idxs2, vals2));
LogTableFactor ans = LogTableFactor.makeFromMatrix (vars, new SparseMatrixn (szs, idxs1, vals3));
Factor ptl3 = ptl1.multiply (ptl2);
assertTrue ("Tast failed! Expected: "+ans+" Actual: "+ptl3, ans.almostEquals (ptl3));
}
public void testSparseDivideLogSpace ()
{
Variable[] vars = new Variable[] { new Variable (2), new Variable (2) };
int[] szs = { 2, 2 };
int[] idxs1 = new int[] { 0, 1, 3 };
double[] vals1 = new double[]{ 2.0, 4.0, 8.0 };
int[] idxs2 = new int[] { 0, 3 };
double[] vals2 = new double [] { 0.5, 0.5 };
double[] vals3 = new double [] { 4.0, 0, 16.0 };
LogTableFactor ptl1 = LogTableFactor.makeFromMatrix (vars, new SparseMatrixn (szs, idxs1, vals1));
LogTableFactor ptl2 = LogTableFactor.makeFromMatrix (vars, new SparseMatrixn (szs, idxs2, vals2));
LogTableFactor ans = LogTableFactor.makeFromMatrix (vars, new SparseMatrixn (szs, idxs1, vals3));
ptl1.divideBy (ptl2);
assertTrue ("Tast failed! Expected: "+ans+" Actual: "+ptl1, ans.almostEquals (ptl1));
}
public void testSparseMarginalizeLogSpace ()
{
Variable[] vars = new Variable[] { new Variable (2), new Variable (2) };
int[] szs = { 2, 2 };
int[] idxs1 = new int[] { 0, 1, 3 };
double[] vals1 = new double[]{ 2.0, 4.0, 8.0 };
LogTableFactor ptl1 = LogTableFactor.makeFromMatrix (vars, new SparseMatrixn (szs, idxs1, vals1));
LogTableFactor ans = LogTableFactor.makeFromValues (vars[0], new double[] { 6, 8 });
Factor ptl2 = ptl1.marginalize (vars[0]);
assertTrue ("Tast failed! Expected: "+ans+" Actual: "+ptl2+" Orig: "+ptl1, ans.almostEquals (ptl2));
}
public void testLogSample ()
{
Variable v = new Variable (2);
double[] vals = new double[] { -30, 0 };
LogTableFactor ptl = LogTableFactor.makeFromLogValues (v, vals);
int idx = ptl.sampleLocation (new Randoms (43));
assertEquals (1, idx);
}
public void testPlusEquals ()
{
Variable var = new Variable (4);
// log 0, log 1, log 2, log 3
double[] vals = new double[] { Double.NEGATIVE_INFINITY, 0, 0.6931471805599453, 1.0986122886681098 };
LogTableFactor factor = LogTableFactor.makeFromLogValues (var, vals);
factor.plusEquals (0.1);
// log 0.1, log 1.1, log 2.1, log 3.1
double[] expected = new double[] { -2.3025850929940455, 0.09531017980432493, 0.7419373447293773, 1.1314021114911006 };
LogTableFactor ans = LogTableFactor.makeFromLogValues (var, expected);
assertTrue ("Error: expected "+ans.dumpToString ()+" but was "+factor.dumpToString (), factor.almostEquals (ans));
}
public void testRecenter ()
{
Variable var = new Variable (4);
double[] vals = new double[]{ 2.0, 4.0, 6.0, 8.0 };
LogTableFactor ltbl1 = LogTableFactor.makeFromValues (var, vals);
ltbl1.recenter ();
double[] expected = new double[] { Math.log (0.25), Math.log(0.5), Math.log (0.75), 0 };
LogTableFactor ans = LogTableFactor.makeFromLogValues (var, expected);
assertTrue ("Error: expected "+ans.dumpToString ()+"but was "+ltbl1.dumpToString (), ans.almostEquals (ltbl1));
}
public void testRecenter2 ()
{
Variable var = new Variable (4);
double[] vals = new double[]{ 0, 1.4, 1.4, 0 };
LogTableFactor ltbl1 = LogTableFactor.makeFromLogValues (var, vals);
ltbl1.recenter ();
double[] expected = new double[]{ -1.4, 0, 0, -1.4 };
LogTableFactor ans = LogTableFactor.makeFromLogValues (var, expected);
assertTrue (!ltbl1.isNaN ());
assertTrue ("Error: expected "+ans.dumpToString ()+"but was "+ltbl1.dumpToString (), ans.almostEquals (ltbl1));
}
public static Test suite ()
{
return new TestSuite (TestLogTableFactor.class);
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestLogTableFactor (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 15,423 | 35.377358 | 136 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestUniNormalFactor.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import cc.mallet.grmm.types.*;
import cc.mallet.types.MatrixOps;
import cc.mallet.util.Randoms;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import gnu.trove.TDoubleArrayList;
/**
* $Id: TestUniNormalFactor.java,v 1.1 2007/10/22 21:37:41 mccallum Exp $
*/
public class TestUniNormalFactor extends TestCase {
public TestUniNormalFactor (String name)
{
super (name);
}
public void testVarSet ()
{
Variable var = new Variable (Variable.CONTINUOUS);
Factor f = new UniNormalFactor (var, -1.0, 1.5);
assertEquals (1, f.varSet ().size ());
assertTrue (f.varSet().contains (var));
}
public void testValue ()
{
Variable var = new Variable (Variable.CONTINUOUS);
Factor f = new UniNormalFactor (var, -1.0, 2.0);
Assignment assn1 = new Assignment (var, -1.0);
assertEquals (0.2821, f.value (assn1), 1e-4);
assertEquals (Math.log (0.2821), f.logValue (assn1), 1e-4);
Assignment assn2 = new Assignment (var, 1.5);
assertEquals (0.05913, f.value (assn2), 1e-4);
assertEquals (Math.log (0.05913), f.logValue (assn2), 1e-4);
}
public void testSample ()
{
Variable var = new Variable (Variable.CONTINUOUS);
Randoms r = new Randoms (2343);
Factor f = new UniNormalFactor (var, -1.0, 2.0);
TDoubleArrayList lst = new TDoubleArrayList ();
for (int i = 0; i < 10000; i++) {
Assignment assn = f.sample (r);
lst.add (assn.getDouble (var));
}
double[] vals = lst.toNativeArray ();
double mean = MatrixOps.mean (vals);
double std = MatrixOps.stddev (vals);
assertEquals (-1.0, mean, 0.025);
assertEquals (Math.sqrt(2.0), std, 0.01);
}
/**
* @return a <code>TestSuite</code>
*/
public static TestSuite suite ()
{
return new TestSuite (TestUniNormalFactor.class);
}
public static void main (String[] args)
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestUniNormalFactor (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 2,631 | 27.608696 | 76 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestListVarSet.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import junit.framework.TestCase;
import junit.framework.Test;
import junit.framework.TestSuite;
import java.util.Arrays;
import java.util.ArrayList;
import java.io.IOException;
import cc.mallet.grmm.types.*;
import cc.mallet.types.tests.TestSerializable;
/**
* Created: Aug 22, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestListVarSet.java,v 1.1 2007/10/22 21:37:41 mccallum Exp $
*/
public class TestListVarSet extends TestCase {
public TestListVarSet (String name)
{
super (name);
}
public void testEqualsHashCode ()
{
Variable[] vars = new Variable [4];
for (int i = 0; i < vars.length; i++) {
vars[i] = new Variable(3);
}
Universe uni = vars[0].getUniverse ();
ListVarSet c1 = new ListVarSet (uni, Arrays.asList (vars));
ListVarSet c2 = new ListVarSet (uni, Arrays.asList (vars));
assertTrue(c1.equals (c2));
assertTrue(c2.equals (c1));
assertEquals (c1.hashCode(), c2.hashCode ());
}
public void testHashCodeByHashVarSet ()
{
Variable[] vars = new Variable [2];
for (int i = 0; i < vars.length; i++) {
vars[i] = new Variable(3);
}
Universe uni = vars[0].getUniverse ();
ListVarSet c1 = new ListVarSet (uni, Arrays.asList (vars));
HashVarSet c2 = new HashVarSet (vars);
assertTrue (c1.equals (c2));
assertEquals (c1.hashCode (), c2.hashCode ());
}
public void testEquals ()
{
Variable[] vars = new Variable [4];
for (int i = 0; i < vars.length; i++) {
vars[i] = new Variable(3);
}
Universe uni = vars[0].getUniverse ();
ListVarSet c = new ListVarSet (uni, Arrays.asList (new Variable[] { vars[0], vars[3] }));
HashVarSet c2 = new HashVarSet (c);
assertTrue (c2.equals (c));
assertTrue (c.equals (c2));
}
public void testContains ()
{
Variable[] vars = new Variable [4];
for (int i = 0; i < vars.length; i++) {
vars[i] = new Variable(3);
}
Universe uni = vars[0].getUniverse ();
ListVarSet c = new ListVarSet (uni, Arrays.asList (new Variable[] { vars[0], vars[3] }));
assertTrue (c.contains (vars[0]));
assertTrue (!c.contains (vars[1]));
assertTrue (!c.contains (vars[2]));
assertTrue (c.contains (vars[3]));
assertEquals (vars[0], c.get (0));
assertEquals (vars[3], c.get (1));
assertEquals (2, c.size ());
}
public void testSerialization () throws IOException, ClassNotFoundException
{
Variable[] vars_orig = new Variable [4];
for (int i = 0; i < vars_orig.length; i++) {
vars_orig[i] = new Variable(3);
}
Universe uni = vars_orig[0].getUniverse ();
ListVarSet c_orig = new ListVarSet (uni, Arrays.asList (new Variable[] { vars_orig[0], vars_orig[3] }));
ListVarSet c = (ListVarSet) TestSerializable.cloneViaSerialization (c_orig);
Universe uni_new = c.get (0).getUniverse ();
Variable[] vars = new Variable[] {
uni_new.get (0),
uni_new.get (1),
uni_new.get (2),
uni_new.get (3),
};
assertTrue (c.contains (vars[0]));
assertTrue (!c.contains (vars[1]));
assertTrue (!c.contains (vars[2]));
assertTrue (c.contains (vars[3]));
assertEquals (vars[0], c.get (0));
assertEquals (vars[3], c.get (1));
assertEquals (2, c.size ());
}
public void testAddAllOrdering ()
{
for (int rep = 0; rep < 1000; rep++) {
Variable[] vars = new Variable[] { new Variable(2), new Variable (2) };
Universe uni = vars[0].getUniverse ();
ListVarSet vs = new ListVarSet (uni, Arrays.asList (vars));
checkOrdering (vs, vars);
}
}
public void testAddAllOrdering2 ()
{
for (int rep = 0; rep < 1000; rep++) {
Variable[] vars = new Variable[] { new Variable(2), new Variable (2) };
Universe uni = vars[0].getUniverse ();
ListVarSet vs = new ListVarSet (uni, new ArrayList ());
vs.addAll (Arrays.asList (vars));
checkOrdering (vs, vars);
}
}
public void testAddAllOrdering3 ()
{
for (int rep = 0; rep < 1000; rep++) {
Variable[] vars = new Variable[] { new Variable(2), new Variable (2) };
Universe uni = vars[0].getUniverse ();
ListVarSet vsOld = new ListVarSet (uni, Arrays.asList (vars));
ListVarSet vs = new ListVarSet (vsOld);
checkOrdering (vs, vars);
}
}
public void testIntersectionOrdering ()
{
for (int rep = 0; rep < 1000; rep++) {
Variable[] varr1 = new Variable[] { new Variable(2), new Variable (2), new Variable (2) };
Variable[] varr2 = new Variable[] { varr1[0], varr1[1] };
Universe uni = varr1[0].getUniverse ();
ListVarSet vs1 = new ListVarSet (uni, Arrays.asList (varr1));
ListVarSet vs2 = new ListVarSet (uni, Arrays.asList (varr2));
VarSet vs_inter = new HashVarSet (vs1.intersection (vs2));
checkOrdering (vs_inter, varr2);
VarSet vs_inter2 = new HashVarSet (vs2.intersection (vs1));
checkOrdering (vs_inter2, varr2);
}
}
public void testIntersectionOrderingToHash ()
{
for (int rep = 0; rep < 1000; rep++) {
Variable[] varr1 = new Variable[] { new Variable(2), new Variable (2), new Variable (2) };
Variable[] varr2 = new Variable[] { varr1[0], varr1[1] };
Universe uni = varr1[0].getUniverse ();
ListVarSet vs1 = new ListVarSet (uni, Arrays.asList (varr1));
VarSet vs2 = new HashVarSet (Arrays.asList (varr2));
VarSet vs_inter = new HashVarSet (vs1.intersection (vs2));
checkOrdering (vs_inter, varr2);
VarSet vs_inter2 = new HashVarSet (vs2.intersection (vs1));
checkOrdering (vs_inter2, varr2);
}
}
public void testIntersectionOrderingToBit ()
{
for (int rep = 0; rep < 1000; rep++) {
Variable[] varr1 = new Variable[] { new Variable(2), new Variable (2), new Variable (2) };
Variable[] varr2 = new Variable[] { varr1[0], varr1[1] };
Universe uni = varr1[0].getUniverse ();
ListVarSet vs1 = new ListVarSet (uni, Arrays.asList (varr1));
VarSet vs2 = new BitVarSet (uni, Arrays.asList (varr2));
VarSet vs_inter = new HashVarSet (vs1.intersection (vs2));
checkOrdering (vs_inter, varr2);
VarSet vs_inter2 = new HashVarSet (vs2.intersection (vs1));
checkOrdering (vs_inter2, varr2);
}
}
private void checkOrdering (VarSet vs, Variable[] vars)
{
assertEquals (vars.length, vs.size ());
for (int i = 0; i < vars.length; i++) {
assertEquals (vars[i], vs.get (i));
}
}
public static Test suite ()
{
return new TestSuite (TestListVarSet.class);
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestListVarSet (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 7,452 | 29.545082 | 108 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestFactorGraph.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import junit.framework.TestCase;
import junit.framework.Test;
import junit.framework.TestSuite;
import java.io.BufferedReader;
import java.io.StringReader;
import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Arrays;
import cc.mallet.grmm.inference.RandomGraphs;
import cc.mallet.grmm.types.*;
import cc.mallet.grmm.util.ModelReader;
import cc.mallet.types.MatrixOps;
import cc.mallet.util.Randoms;
import cc.mallet.util.Timing;
/**
* Created: Mar 17, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestFactorGraph.java,v 1.1 2007/10/22 21:37:40 mccallum Exp $
*/
public class TestFactorGraph extends TestCase {
private Variable[] vars;
private TableFactor tbl1;
private TableFactor tbl2;
private TableFactor tbl3;
private LogTableFactor ltbl1;
private LogTableFactor ltbl2;
public TestFactorGraph (String name)
{
super (name);
}
protected void setUp () throws Exception
{
vars = new Variable[] {
new Variable (2),
new Variable (2),
new Variable (2),
new Variable (2),
};
tbl1 = new TableFactor (new Variable[] { vars[0], vars[1] }, new double[] { 0.8, 0.1, 0.1, 0.8 });
tbl2 = new TableFactor (new Variable[] { vars[1], vars[2] }, new double[] { 0.2, 0.7, 0.8, 0.2 });
tbl3 = new TableFactor (new Variable[] { vars[2], vars[3] }, new double[] { 0.2, 0.4, 0.6, 0.4 });
ltbl1 = LogTableFactor.makeFromValues (new Variable[] { vars[0], vars[1] }, new double[] { 0.8, 0.1, 0.1, 0.8 });
ltbl2 = LogTableFactor.makeFromValues (new Variable[] { vars[1], vars[2] }, new double[] { 0.2, 0.7, 0.8, 0.2 });
}
public void testMultiplyBy ()
{
FactorGraph fg = new FactorGraph ();
fg.multiplyBy (tbl1);
fg.multiplyBy (tbl2);
assertEquals (2, fg.factors ().size());
assertTrue (fg.factors ().contains (tbl1));
assertTrue (fg.factors ().contains (tbl2));
assertEquals (3, fg.numVariables ());
assertTrue (fg.variablesSet ().contains (vars[0]));
assertTrue (fg.variablesSet ().contains (vars[1]));
assertTrue (fg.variablesSet ().contains (vars[2]));
}
public void testNumVariables ()
{
FactorGraph fg = new FactorGraph ();
fg.multiplyBy (tbl1);
fg.multiplyBy (tbl2);
assertEquals (3, fg.numVariables ());
}
public void testMultiply ()
{
FactorGraph fg = new FactorGraph ();
fg.multiplyBy (tbl1);
fg.multiplyBy (tbl2);
FactorGraph fg2 = (FactorGraph) fg.multiply (tbl3);
assertEquals (2, fg.factors ().size());
assertEquals (3, fg2.factors ().size());
assertTrue (!fg.factors ().contains (tbl3));
assertTrue (fg2.factors ().contains (tbl3));
}
public void testValue ()
{
FactorGraph fg = new FactorGraph ();
fg.multiplyBy (tbl1);
fg.multiplyBy (tbl2);
Assignment assn = new Assignment (fg.varSet ().toVariableArray (), new int[] { 0, 1, 0 });
assertEquals (0.08, fg.value (assn), 1e-5);
}
public void testMarginalize ()
{
FactorGraph fg = new FactorGraph ();
fg.multiplyBy (tbl1);
fg.multiplyBy (tbl2);
Factor marg = fg.marginalize (vars[1]);
Factor expected = new TableFactor (vars[1], new double[] { 0.81, 0.9 });
assertTrue (expected.almostEquals (marg));
}
public void testSum ()
{
FactorGraph fg = new FactorGraph ();
fg.multiplyBy (tbl1);
fg.multiplyBy (tbl2);
assertEquals (1.71, fg.sum (), 1e-5);
}
public void testNormalize ()
{
FactorGraph fg = new FactorGraph ();
fg.multiplyBy (tbl1);
fg.multiplyBy (tbl2);
fg.normalize ();
assertEquals (1.0, fg.sum(), 1e-5);
}
public void testLogNormalize ()
{
FactorGraph fg = new FactorGraph ();
fg.multiplyBy (ltbl1);
fg.multiplyBy (ltbl2);
fg.normalize ();
assertEquals (1.0, fg.sum(), 1e-5);
}
public void testEmbeddedFactorGraph ()
{
FactorGraph embeddedFg = new FactorGraph ();
embeddedFg.multiplyBy (tbl1);
embeddedFg.multiplyBy (tbl2);
FactorGraph fg = new FactorGraph ();
fg.multiplyBy (embeddedFg);
fg.multiplyBy (tbl3);
assertEquals (4, fg.varSet ().size ());
assertEquals (2, fg.factors ().size ());
Assignment assn = new Assignment (fg.varSet ().toVariableArray (), new int [4]);
assertEquals (0.032, fg.value (assn), 1e-5);
AbstractTableFactor tbl = fg.asTable ();
assertEquals (4, tbl.varSet ().size ());
assertEquals (0.032, tbl.value (assn), 1e-5);
}
public void testAsTable ()
{
FactorGraph fg = new FactorGraph ();
fg.multiplyBy (tbl1);
fg.multiplyBy (tbl2);
AbstractTableFactor actual = fg.asTable ();
AbstractTableFactor expected = (AbstractTableFactor) tbl1.multiply (tbl2);
assertTrue (expected.almostEquals (actual));
}
public void testTableTimesFg ()
{
FactorGraph fg = new FactorGraph ();
fg.multiplyBy (tbl1);
fg.multiplyBy (tbl2);
Factor product = tbl3.multiply (fg);
assertTrue (product instanceof AbstractTableFactor);
assertEquals (4, product.varSet ().size ());
Assignment assn = new Assignment (product.varSet ().toVariableArray (), new int [4]);
assertEquals (0.032, product.value (assn), 1e-5);
}
public void testLogTableTimesFg ()
{
FactorGraph fg = new FactorGraph ();
fg.multiplyBy (tbl1);
fg.multiplyBy (tbl2);
Factor product = ltbl1.multiply (fg);
assertTrue (product instanceof AbstractTableFactor);
assertEquals (3, product.varSet ().size ());
Assignment assn = new Assignment (product.varSet ().toVariableArray (), new int [3]);
assertEquals (0.128, product.value (assn), 1e-5);
}
public void testRemove ()
{
FactorGraph fg = new FactorGraph ();
fg.multiplyBy (tbl1);
fg.multiplyBy (tbl2);
assertEquals (2, fg.getDegree (vars[1]));
fg.divideBy (tbl1);
assertEquals (2, fg.varSet ().size ());
Assignment assn = new Assignment (fg.varSet ().toVariableArray (), new int [2]);
assertEquals (0.2, fg.value (assn), 1e-5);
int nvs = 0;
for (Iterator it = fg.varSetIterator (); it.hasNext(); it.next ()) {
nvs++;
}
assertEquals (1, nvs);
assertEquals (1, fg.getDegree (vars[1]));
assertTrue (fg.get (0) != fg.get (1));
assertEquals (vars[1], fg.get (0));
assertEquals (vars[2], fg.get (1));
}
public void testRedundantDomains ()
{
FactorGraph fg = new FactorGraph ();
fg.multiplyBy (tbl1);
fg.multiplyBy (tbl2);
fg.multiplyBy (ltbl1);
assertEquals (3, fg.varSet ().size ());
assertEquals ("Wrong factors in FG, was "+fg.dumpToString (), 3, fg.factors ().size ());
Assignment assn = new Assignment (fg.varSet ().toVariableArray (), new int [3]);
assertEquals (0.128, fg.value (assn), 1e-5);
}
private static String uniformMdlstr =
"VAR sigma u1 u2 : continuous\n" +
"VAR x1 x2 : 2\n" +
"sigma ~ Uniform -0.5 0.5\n" +
"u1 ~ Uniform -0.5 0.5\n" +
"u2 ~ Uniform -0.5 0.5\n" +
"x1 x2 ~ BinaryPair sigma\n" +
"x1 ~ Unary u1\n" +
"x2 ~ Unary u2\n";
public void testContinousSample () throws IOException
{
ModelReader reader = new ModelReader ();
FactorGraph fg = reader.readModel (new BufferedReader (new StringReader (uniformMdlstr)));
Randoms r = new Randoms (324143);
Assignment allAssn = new Assignment ();
for (int i = 0; i < 10000; i++) {
Assignment row = fg.sample (r);
allAssn.addRow (row);
}
Variable x1 = fg.findVariable ("x1");
Assignment assn1 = (Assignment) allAssn.marginalize (x1);
int[] col = assn1.getColumnInt (x1);
double mean = MatrixOps.sum (col) / ((double)col.length);
assertEquals (0.5, mean, 0.025);
}
private static String uniformMdlstr2 =
"VAR sigma u1 u2 : continuous\n" +
"VAR x1 x2 : 2\n" +
"sigma ~ Normal 0.0 0.2\n" +
"u1 ~ Normal 0.0 0.2\n" +
"u2 ~ Normal 0.0 0.2\n" +
"x1 x2 ~ BinaryPair sigma\n" +
"x1 ~ Unary u1\n" +
"x2 ~ Unary u2\n";
public void testContinousSample2 () throws IOException
{
ModelReader reader = new ModelReader ();
FactorGraph fg = reader.readModel (new BufferedReader (new StringReader (uniformMdlstr2)));
Randoms r = new Randoms (324143);
Assignment allAssn = new Assignment ();
for (int i = 0; i < 10000; i++) {
Assignment row = fg.sample (r);
allAssn.addRow (row);
}
Variable x1 = fg.findVariable ("x2");
Assignment assn1 = (Assignment) allAssn.marginalize (x1);
int[] col = assn1.getColumnInt (x1);
double mean = MatrixOps.sum (col) / ((double)col.length);
assertEquals (0.5, mean, 0.01);
Variable x2 = fg.findVariable ("x2");
Assignment assn2 = (Assignment) allAssn.marginalize (x2);
int[] col2 = assn2.getColumnInt (x2);
double mean2 = MatrixOps.sum (col2) / ((double)col2.length);
assertEquals (0.5, mean2, 0.025);
}
public void testAllFactorsOf () throws IOException
{
ModelReader reader = new ModelReader ();
FactorGraph fg = reader.readModel (new BufferedReader (new StringReader (uniformMdlstr2)));
Variable var = new Variable (2);
var.setLabel ("v0");
List lst = fg.allFactorsOf (var);
assertEquals (0, lst.size ());
}
public void testAllFactorsOf2 () throws IOException
{
Variable x1 = new Variable (2);
Variable x2 = new Variable (2);
FactorGraph fg = new FactorGraph ();
fg.addFactor (new TableFactor (x1));
fg.addFactor (new TableFactor (x2));
fg.addFactor (new TableFactor (new Variable[] { x1, x2 }));
List lst = fg.allFactorsOf (x1);
assertEquals (1, lst.size ());
for (Iterator it = lst.iterator (); it.hasNext ();) {
Factor f = (Factor) it.next ();
assertEquals (1, f.varSet().size());
assertTrue (f.varSet ().contains (x1));
}
HashVarSet vs = new HashVarSet (new Variable[]{x1, x2});
List lst2 = fg.allFactorsOf (vs);
assertEquals (1, lst2.size ());
Factor f = (Factor) lst2.get (0);
assertTrue (f.varSet ().equals (vs));
}
public void testAsTable2 ()
{
Factor f1 = new TableFactor (vars[0], new double[] { 0.6, 0.4 });
Factor f2 = new ConstantFactor (2.0);
FactorGraph fg = new FactorGraph (new Factor[] { f1, f2 });
AbstractTableFactor tbl = fg.asTable ();
assertTrue (Arrays.equals(new double[] { 0.6 * 2.0, 0.4 * 2.0 }, tbl.toValueArray ()));
}
public void testClear ()
{
FactorGraph fg = new FactorGraph ();
fg.multiplyBy (tbl1);
fg.multiplyBy (tbl2);
assertEquals (3, fg.numVariables ());
assertEquals (2, fg.factors ().size ());
fg.clear ();
assertEquals (0, fg.numVariables ());
assertEquals (0, fg.factors ().size ());
for (int vi = 0; vi < tbl1.varSet ().size (); vi++) {
assertTrue (!fg.containsVar (tbl1.getVariable (vi)));
}
for (int vi = 0; vi < tbl2.varSet ().size (); vi++) {
assertTrue (!fg.containsVar (tbl2.getVariable (vi)));
}
}
public void testCacheExpanding ()
{
FactorGraph baseFg = RandomGraphs.randomFrustratedGrid (25, 1.0, new java.util.Random (3324879));
Assignment assn = new Assignment (baseFg, new int[baseFg.numVariables ()]);
double val = baseFg.logValue (assn);
Timing timing = new Timing ();
int numReps = 100;
for (int rep = 0; rep < numReps; rep++) {
FactorGraph fg = new FactorGraph (baseFg.numVariables ());
for (int fi = 0; fi < baseFg.factors().size(); fi++) {
fg.multiplyBy (baseFg.getFactor (fi));
}
assertEquals (val, fg.logValue (assn), 1e-5);
}
long time1 = timing.elapsedTime ();
timing.tick ("No-expansion time");
for (int rep = 0; rep < numReps; rep++) {
FactorGraph fg = new FactorGraph ();
for (int fi = 0; fi < baseFg.factors().size(); fi++) {
fg.multiplyBy (baseFg.getFactor (fi));
}
assertEquals (val, fg.logValue (assn), 1e-5);
}
long time2 = timing.elapsedTime ();
timing.tick ("With-expansion time");
assertTrue (time1 < time2);
}
public static Test suite ()
{
return new TestSuite (TestFactorGraph.class);
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestFactorGraph (args[i]));
}
} else {
theSuite = (TestSuite) TestFactorGraph.suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 13,110 | 28.595937 | 117 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestTableFactor.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import java.io.IOException;
import java.util.Set;
import java.util.List;
import java.util.ArrayList;
import cc.mallet.grmm.types.*;
import cc.mallet.types.MatrixOps;
import cc.mallet.types.SparseMatrixn;
import cc.mallet.types.tests.TestSerializable;
import cc.mallet.util.ArrayUtils;
import cc.mallet.util.Maths;
import cc.mallet.util.Randoms;
/**
* Created: Aug 17, 2004
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestTableFactor.java,v 1.1 2007/10/22 21:37:40 mccallum Exp $
*/
public class TestTableFactor extends TestCase {
public TestTableFactor (String name)
{
super (name);
}
public void testMultiplyMultiplyBy ()
{
Variable var = new Variable (4);
double[] vals = new double[]{ 2.0, 4.0, 6.0, 8.0 };
double[] vals2 = new double [] { 0.5, 0.5, 0.5, 0.5 };
double[] vals3 = new double [] { 1, 2, 3, 4, };
TableFactor ans = new TableFactor (var, vals3);
TableFactor ptl1 = new TableFactor (var, vals);
TableFactor ptl2 = new TableFactor (var, vals2);
Factor ptl3 = ptl1.multiply (ptl2);
ptl1.multiplyBy (ptl2);
assertTrue (ans.almostEquals (ptl1));
assertTrue (ans.almostEquals (ptl3));
}
public void testTblTblPlusEquals ()
{
Variable var = new Variable (4);
double[] vals = new double[]{ 2.0, 4.0, 6.0, 8.0 };
double[] vals2 = new double [] { 0.25, 0.5, 0.75, 1.0 };
double[] vals3 = new double [] { 2.25, 4.5, 6.75, 9.0, };
TableFactor ans = new TableFactor (var, vals3);
TableFactor ptl1 = new TableFactor (var, vals);
TableFactor ptl2 = new TableFactor (var, vals2);
ptl1.plusEquals (ptl2);
assertTrue (ans.almostEquals (ptl1));
}
public void testEntropy ()
{
Variable v1 = new Variable (2);
TableFactor ptl = new TableFactor (v1, new double[] { 0.3, 0.7 });
double entropy = ptl.entropy ();
assertEquals (0.61086, entropy, 1e-3);
LogTableFactor logFactor = LogTableFactor.makeFromValues (v1, new double[] { 0.3, 0.7 });
double entropy2 = logFactor.entropy ();
assertEquals (0.61086, entropy2, 1e-3);
}
// fails
public void ignoreTestSerialization () throws IOException, ClassNotFoundException
{
Variable v1 = new Variable (2);
Variable v2 = new Variable (3);
Variable[] vars = { v1, v2 };
double[] vals = new double[]{ 2.0, 4.0, 6.0, 3, 5, 7 };
TableFactor ptl = new TableFactor (vars, vals);
TableFactor ptl2 = (TableFactor) TestSerializable.cloneViaSerialization (ptl);
Set varset1 = ptl.varSet();
Set varset2 = ptl2.varSet();
assertTrue (!varset1.contains (varset2)); // Variables deep-cloned
// There's not way to get directly at the matrices...!
comparePotentialValues (ptl, ptl2);
TableFactor marg1 = (TableFactor) ptl.marginalize (v1);
TableFactor marg2 = (TableFactor) ptl2.marginalize (ptl2.findVariable (v1.getLabel ()));
comparePotentialValues (marg1, marg2);
}
private void comparePotentialValues (TableFactor ptl, TableFactor ptl2)
{
AssignmentIterator it1 = ptl.assignmentIterator ();
AssignmentIterator it2 = ptl2.assignmentIterator ();
while (it1.hasNext ()) {
assertTrue (ptl.value (it1) == ptl.value (it2));
it1.advance (); it2.advance ();
}
}
public void testSample ()
{
Variable v = new Variable (3);
double[] vals = new double[] { 1, 3, 2 };
TableFactor ptl = new TableFactor (v, vals);
int[] sampled = new int [100];
Randoms r = new Randoms (32423);
for (int i = 0; i < sampled.length; i++) {
sampled[i] = ptl.sampleLocation (r);
}
double sum = MatrixOps.sum (vals);
double[] counts = new double [vals.length];
for (int i = 0; i < vals.length; i++) {
counts[i] = ArrayUtils.count (sampled, i);
}
MatrixOps.print (counts);
for (int i = 0; i < vals.length; i++) {
double prp = counts[i] / ((double) sampled.length);
assertEquals (vals[i] / sum, prp, 0.1);
}
}
public void testMarginalize ()
{
Variable[] vars = new Variable[] { new Variable (2), new Variable (2) };
TableFactor ptl = new TableFactor (vars, new double[] { 1, 2, 3, 4});
TableFactor ptl2 = (TableFactor) ptl.marginalize (vars[1]);
assertEquals ("FAILURE: Potential has too many vars.\n "+ptl2, 1, ptl2.varSet ().size ());
assertTrue ("FAILURE: Potential does not contain "+vars[1]+":\n "+ptl2, ptl2.varSet ().contains (vars[1]));
double[] expected = new double[] { 4, 6 };
assertTrue ("FAILURE: Potential has incorrect values. Expected "+ArrayUtils.toString (expected)+"was "+ptl2,
Maths.almostEquals (ptl2.toValueArray (), expected, 1e-5));
}
public void testMarginalizeOut ()
{
Variable[] vars = new Variable[] { new Variable (2), new Variable (2) };
TableFactor ptl = new TableFactor (vars, new double[] { 1, 2, 3, 4});
TableFactor ptl2 = (TableFactor) ptl.marginalizeOut (vars[0]);
assertEquals ("FAILURE: Potential has too many vars.\n "+ptl2, 1, ptl2.varSet ().size ());
assertTrue ("FAILURE: Potential does not contain "+vars[1]+":\n "+ptl2, ptl2.varSet ().contains (vars[1]));
double[] expected = new double[] { 4, 6 };
assertTrue ("FAILURE: Potential has incorrect values. Expected "+ArrayUtils.toString (expected)+"was "+ptl2,
Maths.almostEquals (ptl2.toValueArray (), expected, 1e-5));
}
public void testOneVarSlice ()
{
double[] vals = { 0.0, 1.3862943611198906, 0.6931471805599453, 1.791759469228055 };
Variable v1 = new Variable (2);
Variable v2 = new Variable (2);
Variable[] vars = new Variable[]{v1, v2};
Factor ptl = new TableFactor (vars, vals);
Assignment assn = new Assignment (v1, 0);
TableFactor sliced = (TableFactor) ptl.slice (assn);
TableFactor expected = new TableFactor (v2, new double[] { 1.0, 4.0 });
comparePotentialValues (sliced, expected);
}
public void testTwoVarSlice ()
{
double[] vals = { 0.0, 1, 2, 3, 4, 5, 6, 7 };
Variable v1 = new Variable (2);
Variable v2 = new Variable (2);
Variable v3 = new Variable (2);
Variable[] vars = new Variable[]{v1, v2, v3};
Factor ptl = new TableFactor (vars, vals);
Assignment assn = new Assignment (v3, 0);
TableFactor sliced = (TableFactor) ptl.slice (assn);
TableFactor expected = new TableFactor (new Variable[] {v1, v2}, new double[] { 0, 2, 4, 6 });
comparePotentialValues (sliced, expected);
}
public void testMultiVarSlice ()
{
double[] vals = { 0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
Variable v1 = new Variable (2);
Variable v2 = new Variable (2);
Variable v3 = new Variable (2);
Variable v4 = new Variable (2);
Variable[] vars = new Variable[]{v1, v2, v3, v4};
Factor ptl = new TableFactor (vars, vals);
System.out.println (ptl);
Assignment assn = new Assignment (v4, 0);
TableFactor sliced = (TableFactor) ptl.slice (assn);
System.out.println (new TableFactor ((AbstractTableFactor) sliced));
TableFactor expected = new TableFactor (new Variable[] { v1,v2,v3 }, new double[] { 0, 2, 4, 6, 8, 10, 12, 14 });
comparePotentialValues (sliced, expected);
}
public void testLogMultiVarSlice ()
{
double[] vals = { 0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
Variable v1 = new Variable (2);
Variable v2 = new Variable (2);
Variable v3 = new Variable (2);
Variable v4 = new Variable (2);
Variable[] vars = new Variable[]{v1, v2, v3, v4};
Factor ptl = LogTableFactor.makeFromValues (vars, vals);
System.out.println (ptl.dumpToString ());
Assignment assn = new Assignment (v4, 0);
LogTableFactor sliced = (LogTableFactor) ptl.slice (assn);
LogTableFactor expected = LogTableFactor.makeFromValues (new Variable[] { v1,v2,v3 }, new double[] { 0, 2, 4, 6, 8, 10, 12, 14 });
assertTrue ("Test failed. Expected: "+expected.dumpToString ()+"\nActual: "+sliced.dumpToString (),
expected.almostEquals (sliced));
}
public void testSparseMultiply ()
{
Variable[] vars = new Variable[] { new Variable (2), new Variable (2) };
int[] szs = { 2, 2 };
int[] idxs1 = new int[] { 0, 1, 3 };
double[] vals1 = new double[]{ 2.0, 4.0, 8.0 };
int[] idxs2 = new int[] { 0, 3 };
double[] vals2 = new double [] { 0.5, 0.5 };
double[] vals3 = new double [] { 1.0, 0, 4.0 };
TableFactor ptl1 = new TableFactor (vars);
ptl1.setValues (new SparseMatrixn (szs, idxs1, vals1));
TableFactor ptl2 = new TableFactor (vars);
ptl2.setValues (new SparseMatrixn (szs, idxs2, vals2));
TableFactor ans = new TableFactor (vars);
ans.setValues (new SparseMatrixn (szs, idxs1, vals3));
Factor ptl3 = ptl1.multiply (ptl2);
assertTrue ("Tast failed! Expected: "+ans+" Actual: "+ptl3, ans.almostEquals (ptl3));
}
public void testSparseDivide ()
{
Variable[] vars = new Variable[] { new Variable (2), new Variable (2) };
int[] szs = { 2, 2 };
int[] idxs1 = new int[] { 0, 1, 3 };
double[] vals1 = new double[]{ 2.0, 4.0, 8.0 };
int[] idxs2 = new int[] { 0, 3 };
double[] vals2 = new double [] { 0.5, 0.5 };
double[] vals3 = new double [] { 4.0, 0, 16.0 };
TableFactor ptl1 = new TableFactor (vars);
ptl1.setValues (new SparseMatrixn (szs, idxs1, vals1));
TableFactor ptl2 = new TableFactor (vars);
ptl2.setValues (new SparseMatrixn (szs, idxs2, vals2));
TableFactor ans = new TableFactor (vars);
ans.setValues (new SparseMatrixn (szs, idxs1, vals3));
ptl1.divideBy (ptl2);
assertTrue ("Tast failed! Expected: "+ans+" Actual: "+ptl1, ans.almostEquals (ptl1));
}
public void testSparseMarginalize ()
{
Variable[] vars = new Variable[] { new Variable (2), new Variable (2) };
int[] szs = { 2, 2 };
int[] idxs1 = new int[] { 0, 1, 3 };
double[] vals1 = new double[]{ 2.0, 4.0, 8.0 };
TableFactor ptl1 = new TableFactor (vars);
ptl1.setValues (new SparseMatrixn (szs, idxs1, vals1));
TableFactor ans = new TableFactor (vars[0], new double[] { 6, 8 });
Factor ptl2 = ptl1.marginalize (vars[0]);
assertTrue ("Tast failed! Expected: "+ans+" Actual: "+ptl2+" Orig: "+ptl1, ans.almostEquals (ptl2));
}
public void testSparseExtractMax ()
{
Variable[] vars = new Variable[] { new Variable (2), new Variable (2) };
int[] szs = { 2, 2 };
int[] idxs1 = new int[] { 0, 1, 3 };
double[] vals1 = new double[]{ 2.0, 4.0, 8.0 };
TableFactor ptl1 = new TableFactor (vars);
ptl1.setValues (new SparseMatrixn (szs, idxs1, vals1));
TableFactor ans = new TableFactor (vars[0], new double[] { 4, 8 });
Factor ptl2 = ptl1.extractMax (vars[0]);
assertTrue ("Tast failed! Expected: "+ans+" Actual: "+ptl2+ "Orig: "+ptl1, ans.almostEquals (ptl2));
}
public void testLogSample ()
{
Variable v = new Variable (2);
double[] vals = new double[] { -30, 0 };
LogTableFactor ptl = LogTableFactor.makeFromLogValues (v, vals);
int idx = ptl.sampleLocation (new Randoms (43));
assertEquals (1, idx);
}
public void testExp ()
{
Variable var = new Variable (4);
double[] vals = new double[] {2.0, 4.0, 6.0, 8.0};
double[] vals3 = new double [] { 4.0, 16.0, 36.0, 64.0 };
TableFactor ans = new TableFactor (var, vals3);
TableFactor ptl1 = new TableFactor (var, vals);
ptl1.exponentiate (2.0);
assertTrue ("Error: expected "+ans.dumpToString ()+" but was "+ptl1.dumpToString (), ptl1.almostEquals (ans));
}
public void testPlusEquals ()
{
Variable var = new Variable (4);
double[] vals = new double[]{ 2.0, 4.0, 6.0, 8.0 };
TableFactor factor = new TableFactor (var, vals);
factor.plusEquals (0.1);
double[] expected = new double[] { 2.1, 4.1, 6.1, 8.1 };
TableFactor ans = new TableFactor (var, expected);
assertTrue ("Error: expected "+ans.dumpToString ()+" but was "+factor.dumpToString (), factor.almostEquals (ans));
}
public void testMultiplyAll ()
{
for (int rep = 0; rep < 100; rep++) {
Variable v1 = new Variable (2);
Variable v2 = new Variable (2);
Variable[] vars = new Variable[] { v1, v2 };
double[] vals = new double[] { 2.0, 4.0, 6.0, 8.0 };
double[] vals2 = new double [] { 0.5, 0.5, 0.5, 0.5 };
double[] vals3 = new double [] { 1, 2, 3, 4,};
TableFactor ans = new TableFactor (vars, vals3);
TableFactor ptl1 = new TableFactor (vars, vals);
TableFactor ptl2 = new TableFactor (vars, vals2);
Factor ptl3 = TableFactor.multiplyAll (new Factor[] { ptl1, ptl2 });
VarSet vs = ptl3.varSet ();
for (int i = 0; i < vars.length; i++) {
assertEquals (vars[i], vs.get (i));
}
assertTrue (ans.almostEquals (ptl3));
}
}
public void testExpandToContain ()
{
Variable v1 = new Variable (2);
Variable v2 = new Variable (2);
Variable v3 = new Variable (2);
Variable[] vars = new Variable[] { v1, v2 };
double[] vals = new double[] { 2.0, 4.0, 6.0, 8.0 };
double[] vals2 = new double [] { 0.5, 0.5 };
TableFactor f1 = new TableFactor (vars, vals);
TableFactor f2 = new TableFactor (v3, vals2);
f1.multiplyBy (f2);
Variable[] allV = new Variable[] { v1, v2, v3 };
double[] exp = new double[] { 1, 1, 2, 2, 3, 3, 4, 4, };
TableFactor ans = new TableFactor (allV, exp);
System.out.println (f1.dumpToString ());
System.out.println (ans.dumpToString ());
assertTrue (ans.almostEquals (f1));
}
public static Test suite ()
{
return new TestSuite (TestTableFactor.class);
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestTableFactor (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 14,565 | 32.408257 | 134 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestNormalFactor.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import cc.mallet.grmm.types.*;
import cc.mallet.types.MatrixOps;
import cc.mallet.util.Randoms;
import junit.framework.*;
import gnu.trove.TDoubleArrayList;
import no.uib.cipr.matrix.Vector;
import no.uib.cipr.matrix.DenseVector;
import no.uib.cipr.matrix.Matrix;
import no.uib.cipr.matrix.DenseMatrix;
/**
* $Id: TestNormalFactor.java,v 1.1 2007/10/22 21:37:41 mccallum Exp $
*/
public class TestNormalFactor extends TestCase {
public TestNormalFactor (String name)
{
super (name);
}
public void testSample ()
{
Variable v1 = new Variable (Variable.CONTINUOUS);
Variable v2 = new Variable (Variable.CONTINUOUS);
Randoms r = new Randoms (2343);
Vector mu = new DenseVector (new double[] { 1.0, 2.0 });
Matrix var = new DenseMatrix (new double[][] {{ 0.5, 2.0 }, { 0, 1 }});
// Matrix var = new DenseMatrix (new double[][] {{ 0.5, 2.0 }, { 2.0, 0.75 }});
VarSet vars = new HashVarSet (new Variable[] { v1, v2 });
Factor f = new NormalFactor (vars, mu, var);
TDoubleArrayList v1lst = new TDoubleArrayList ();
TDoubleArrayList v2lst = new TDoubleArrayList ();
for (int i = 0; i < 100000; i++) {
Assignment assn = f.sample (r);
v1lst.add (assn.getDouble (v1));
v2lst.add (assn.getDouble (v2));
}
checkMeanStd (v1lst, 1.0, Math.sqrt (1/0.5));
checkMeanStd (v2lst, 2.0, Math.sqrt (1/0.75));
}
void checkMeanStd (TDoubleArrayList ell, double mu, double sigma)
{
double[] vals = ell.toNativeArray ();
double mean1 = MatrixOps.mean (vals);
double std1 = MatrixOps.stddev (vals);
assertEquals (mu, mean1, 0.025);
assertEquals (sigma, std1, 0.01);
}
/**
* @return a <code>TestSuite</code>
*/
public static TestSuite suite ()
{
return new TestSuite (TestNormalFactor.class);
}
public static void main (String[] args)
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestNormalFactor (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 2,614 | 28.382022 | 82 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestFactors.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import cc.mallet.grmm.types.*;
import junit.framework.*;
/**
* Created: Mar 28, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestFactors.java,v 1.1 2007/10/22 21:37:41 mccallum Exp $
*/
public class TestFactors extends TestCase {
public TestFactors (String name)
{
super (name);
}
public void testNormalizeAsCpt ()
{
double[] vals = { 1, 4, 2, 6 };
Variable v1 = new Variable (2);
Variable v2 = new Variable (2);
TableFactor ptl = new TableFactor (new Variable[] {v1, v2}, vals);
Factors.normalizeAsCpt (ptl, v1);
comparePotentials (ptl, new double[] { 0.3333, 0.4, 0.6666, 0.6 });
}
public void testSparseNormalizeAsCpt ()
{
double[] vals = { 1, 4, 0, 0, 0, 0.5, 0, 0 };
Variable v1 = new Variable (2);
Variable v2 = new Variable (2);
Variable v3 = new Variable (2);
TableFactor ptl = new TableFactor (new Variable[] {v1, v2, v3}, vals);
Factors.normalizeAsCpt (ptl, v3);
comparePotentials (ptl, new double[] { 0.2, 0.8, 0, 0, 0, 1, 0, 0 });
}
public void testNormalizeAsCptLogSpace ()
{
double[] vals = { 0.0, 1.3862943611198906, 0.6931471805599453, 1.791759469228055 };
Variable v1 = new Variable (2);
Variable v2 = new Variable (2);
AbstractTableFactor ptl = LogTableFactor.makeFromLogValues(new Variable[] { v1, v2 }, vals);
System.out.println (ptl);
Factors.normalizeAsCpt (ptl, v1);
System.out.println (ptl);
comparePotentials (ptl, new double[] { 0.3333, 0.4, 0.6666, 0.6 });
// comparePotentials (ptl, new double[] { -1.098712293668443, -0.916290731874155, -0.4055651131084978, -0.5108256237659907 });
}
private void comparePotentials (DiscreteFactor ptl, double[] expected)
{
double[] actual = ptl.toValueArray ();
assertEquals (expected.length, actual.length);
for (int i = 0; i < expected.length; i++) {
assertEquals (expected[i], actual[i], 0.001);
}
}
public void testRetainMass ()
{
Variable v = new Variable (4);
LogTableFactor ptl = LogTableFactor.makeFromValues (v, new double[] { 0.75, 0, 0.05, 0.2 });
TableFactor actual = Factors.retainMass (ptl, 0.9);
System.out.println (actual);
// comparePotentials (actual, new double[] { Math.log (0.75), Math.log (0.2) });
}
public void testMutualInfo1 ()
{
Factor ptl1 = new TableFactor (new Variable (2), new double[] { 0.7, 0.3 });
Factor ptl2 = new TableFactor (new Variable (2), new double[] { 0.2, 0.8 });
Factor joint = ptl1.multiply (ptl2);
assertEquals (0.0, Factors.mutualInformation (joint), 1e-5);
}
public void testMutualInfo2 ()
{
Variable[] vars = new Variable[] { new Variable (2), new Variable (2) };
Factor joint = new TableFactor (vars, new double[] { 0.3, 0.2, 0.1, 0.4 });
System.out.println (joint.dumpToString ());
assertEquals (0.08630462, Factors.mutualInformation (joint), 1e-5);
}
public void testMix ()
{
Variable var = new Variable (2);
AbstractTableFactor tf = new TableFactor (var, new double[] { 0.3, 0.7 });
AbstractTableFactor ltf = LogTableFactor.makeFromValues (var, new double[] { 0.5, 0.5 });
Factor mix = Factors.mix (tf, ltf, 0.5);
AbstractTableFactor ans = new TableFactor (var, new double[] { 0.4, 0.6 });
assertTrue (ans.almostEquals (mix));
}
public void testCorr ()
{
Variable var1 = new Variable (2);
Variable var2 = new Variable (2);
TableFactor f = new TableFactor (new Variable[] { var1, var2 }, new double[] { 0.3, 0.1, 0.2, 0.4 } );
double corr = Factors.corr (f);
// 0.4 - 0.3 = 0.1
assertEquals (0.1, corr, 1e-5);
}
public void testLogErrorRange ()
{
Variable var1 = new Variable (2);
Variable var2 = new Variable (2);
TableFactor f1 = new TableFactor (new Variable[] { var1, var2 }, new double[] { 0.3, 0.1, 0.2, 0.4 } );
TableFactor f2 = new TableFactor (new Variable[] { var1, var2 }, new double[] { 0.2, 0.1, 0.4, 0.3 } );
assertEquals (Math.log (2), Factors.logErrorRange (f1, f2), 1e-10);
assertEquals (Math.log (2), Factors.logErrorRange (f2, f1), 1e-10);
TableFactor f3 = new TableFactor (new Variable[] { var1, var2 }, new double[] { 0.2, 0.4, 0.3, 0.1 } );
double exp = Math.log (4) - Math.log (1.5);
assertEquals (exp, Factors.logErrorRange (f1, f3), 1e-10);
assertEquals (exp, Factors.logErrorRange (f3, f1), 1e-10);
}
public static Test suite ()
{
return new TestSuite (TestFactors.class);
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestFactors (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 5,346 | 33.057325 | 129 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestBitVarSet.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import junit.framework.TestCase;
import junit.framework.Test;
import junit.framework.TestSuite;
import java.util.Arrays;
import java.util.ArrayList;
import cc.mallet.grmm.types.*;
/**
* Created: Aug 22, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TestBitVarSet.java,v 1.1 2007/10/22 21:37:41 mccallum Exp $
*/
public class TestBitVarSet extends TestCase {
public TestBitVarSet (String name)
{
super (name);
}
public void testEqualsHashCode ()
{
Variable[] vars = new Variable [4];
for (int i = 0; i < vars.length; i++) {
vars[i] = new Variable(3);
}
Universe uni = vars[0].getUniverse ();
BitVarSet c1 = new BitVarSet (uni, Arrays.asList (vars));
BitVarSet c2 = new BitVarSet (uni, Arrays.asList (vars));
assertTrue(c1.equals (c2));
assertTrue(c2.equals (c1));
assertEquals (c1.hashCode(), c2.hashCode ());
}
public void testAddAllOrdering ()
{
for (int rep = 0; rep < 1000; rep++) {
Variable[] vars = new Variable[] { new Variable(2), new Variable (2) };
Universe uni = vars[0].getUniverse ();
BitVarSet vs = new BitVarSet (uni, Arrays.asList (vars));
checkOrdering (vs, vars);
}
}
public void testAddAllOrdering2 ()
{
for (int rep = 0; rep < 1000; rep++) {
Variable[] vars = new Variable[] { new Variable(2), new Variable (2) };
Universe uni = vars[0].getUniverse ();
BitVarSet vs = new BitVarSet (uni, new ArrayList ());
vs.addAll (Arrays.asList (vars));
checkOrdering (vs, vars);
}
}
public void testAddAllOrdering3 ()
{
for (int rep = 0; rep < 1000; rep++) {
Variable[] vars = new Variable[] { new Variable(2), new Variable (2) };
Universe uni = vars[0].getUniverse ();
BitVarSet vsOld = new BitVarSet (uni, Arrays.asList (vars));
BitVarSet vs = new BitVarSet (vsOld);
checkOrdering (vs, vars);
}
}
private void checkOrdering (VarSet vs, Variable[] vars)
{
assertEquals (vars.length, vs.size ());
for (int i = 0; i < vars.length; i++) {
assertEquals (vars[i], vs.get (i));
}
}
public static Test suite ()
{
return new TestSuite (TestBitVarSet.class);
}
public static void main (String[] args) throws Throwable
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestBitVarSet (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 3,063 | 27.37037 | 77 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestInference.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import junit.framework.AssertionFailedError;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import java.util.*;
import java.util.Random;
import java.util.logging.Logger;
import java.io.IOException;
import java.io.StringReader;
import java.io.BufferedReader;
import cc.mallet.grmm.inference.*;
import cc.mallet.grmm.types.*;
import cc.mallet.grmm.util.GeneralUtils;
import cc.mallet.grmm.util.ModelReader;
import cc.mallet.types.Dirichlet;
import cc.mallet.types.Matrix;
import cc.mallet.types.Matrixn;
import cc.mallet.types.tests.TestSerializable;
import cc.mallet.util.*;
//import cc.mallet.util.Random;
import gnu.trove.TDoubleArrayList;
/**
* Torture tests of inference in GRMM. Well, actually, they're
* not all that torturous, but hopefully they're at least
* somewhat disconcerting.
*
* @author <a href="mailto:[email protected]">Charles Sutton</a>
* @version $Id: TestInference.java,v 1.1 2007/10/22 21:37:40 mccallum Exp $
*/
public class TestInference extends TestCase {
private static Logger logger = MalletLogger.getLogger(TestInference.class.getName());
private static double APPX_EPSILON = 0.15;
final public Class[] algorithms = {
BruteForceInferencer.class,
VariableElimination.class,
JunctionTreeInferencer.class,
};
final public Class[] appxAlgs = {
TRP.class,
LoopyBP.class,
};
// only used for logJoint test for now
final public Class[] allAlgs = {
// BruteForceInferencer.class,
JunctionTreeInferencer.class,
TRP.class,
// VariableElimination.class,
LoopyBP.class,
};
final public Class[] treeAlgs = {
TreeBP.class,
};
List modelsList;
UndirectedModel[] models;
FactorGraph[] trees;
Factor[][] treeMargs;
public TestInference(String name)
{
super(name);
}
private static UndirectedModel createChainGraph()
{
Variable[] vars = new Variable[5];
UndirectedModel model = new UndirectedModel();
try {
// Add all variables to model
for (int i = 0; i < 5; i++) {
vars[i] = new Variable(2);
}
// Add some links
double probs[] = {0.9, 0.1, 0.1, 0.9};
for (int i = 0; i < 4; i++) {
Variable[] pair = { vars[i], vars[i + 1], };
TableFactor pot = new TableFactor (pair, probs);
model.addFactor (pot);
}
} catch (Exception e) {
e.printStackTrace();
assertTrue(false);
}
return model;
}
private static UndirectedModel createTriangle()
{
Variable[] vars = new Variable[3];
for (int i = 0; i < 3; i++) {
vars[i] = new Variable (2);
}
UndirectedModel model = new UndirectedModel (vars);
double[][] pots = new double[][] { { 0.2, 0.8, 0.1, 0.9 },
{ 0.7, 0.3, 0.5, 0.5 },
{ 0.6, 0.4, 0.8, 0.2 },
{ 0.35, 0.65 } };
// double[][] pots = new double[] [] { {
model.addFactor (vars[0], vars[1], pots[0]);
model.addFactor (vars[1], vars[2], pots[1]);
model.addFactor (vars[2], vars[0], pots[2]);
TableFactor pot = new TableFactor (new Variable[] { vars[0] }, pots[3]);
model.addFactor (pot);
return model;
}
private static TableFactor randomEdgePotential(Random r,
Variable v1, Variable v2)
{
int max1 = v1.getNumOutcomes();
int max2 = v2.getNumOutcomes();
Matrix phi = new Matrixn(new int[]{max1, max2});
for (int i = 0; i < v1.getNumOutcomes(); i++) {
for (int j = 0; j < v2.getNumOutcomes(); j++) {
phi.setValue(new int[]{i, j}, r.nextDouble ()); // rescale(r.nextDouble()));
}
}
return new TableFactor
(new Variable[]{v1, v2}, phi);
}
private static TableFactor randomNodePotential(Random r, Variable v)
{
int max = v.getNumOutcomes();
Matrix phi = new Matrixn(new int[]{max});
for (int i = 0; i < v.getNumOutcomes(); i++) {
phi.setSingleValue(i, rescale(r.nextDouble()));
}
return new TableFactor
(new Variable[]{v}, phi);
}
// scale d into range 0.2..0.8
private static double rescale(double d)
{
return 0.2 + 0.6 * d;
}
private static UndirectedModel createRandomGraph(int numV, int numOutcomes, Random r)
{
Variable[] vars = new Variable[numV];
for (int i = 0; i < numV; i++) {
vars[i] = new Variable(numOutcomes);
}
UndirectedModel model = new UndirectedModel(vars);
for (int i = 0; i < numV; i++) {
boolean hasOne = false;
for (int j = i + 1; j < numV; j++) {
if (r.nextBoolean()) {
hasOne = true;
model.addFactor (randomEdgePotential (r, vars[i], vars[j]));
}
}
// If vars [i] has no edge potential, add a node potential
// To keep things simple, we'll require the potential to be normalized.
if (!hasOne) {
Factor pot = randomNodePotential(r, vars[i]);
pot.normalize();
model.addFactor (pot);
}
}
// Ensure exactly one connected component
for (int i = 0; i < numV; i++) {
for (int j = i + 1; j < numV; j++) {
if (!model.isConnected(vars[i], vars[j])) {
Factor ptl = randomEdgePotential (r, vars[i], vars[j]);
model.addFactor (ptl);
}
}
}
return model;
}
public static UndirectedModel createRandomGrid(int w, int h, int maxOutcomes, Random r)
{
Variable[][] vars = new Variable[w][h];
UndirectedModel mdl = new UndirectedModel(w * h);
for (int i = 0; i < w; i++) {
for (int j = 0; j < h; j++) {
vars[i][j] = new Variable(r.nextInt(maxOutcomes - 1) + 2);
}
}
for (int i = 0; i < w; i++) {
for (int j = 0; j < h; j++) {
Factor ptl;
if (i < w - 1) {
ptl = randomEdgePotential (r, vars[i][j], vars[i + 1][j]);
mdl.addFactor (ptl);
}
if (j < h - 1) {
ptl = randomEdgePotential (r, vars[i][j], vars[i][j + 1]);
mdl.addFactor (ptl);
}
}
}
return mdl;
}
private UndirectedModel createRandomTree(int nnodes, int maxOutcomes, Random r)
{
Variable[] vars = new Variable[nnodes];
UndirectedModel mdl = new UndirectedModel(nnodes);
for (int i = 0; i < nnodes; i++) {
vars[i] = new Variable(r.nextInt(maxOutcomes - 1) + 2);
}
// Add some random edges
for (int i = 0; i < nnodes; i++) {
for (int j = i + 1; j < nnodes; j++) {
if (!mdl.isConnected(vars[i], vars[j]) && r.nextBoolean()) {
Factor ptl = randomEdgePotential (r, vars[i], vars[j]);
mdl.addFactor (ptl);
}
}
}
// Ensure exactly one connected component
for (int i = 0; i < nnodes; i++) {
for (int j = i + 1; j < nnodes; j++) {
if (!mdl.isConnected(vars[i], vars[j])) {
System.out.println ("forced edge: " + i + " " + j);
Factor ptl = randomEdgePotential (r, vars[i], vars[j]);
mdl.addFactor (ptl);
}
}
}
return mdl;
}
public static List createTestModels()
{
Random r = new Random(42);
// These models are all small so that we can run the brute force
// inferencer on them.
FactorGraph[] mdls = new FactorGraph[]{
createTriangle(),
createChainGraph(),
createRandomGraph(3, 2, r),
createRandomGraph(3, 3, r),
createRandomGraph(6, 3, r),
createRandomGraph(8, 2, r),
createRandomGrid(3, 2, 4, r),
createRandomGrid(4, 3, 2, r),
};
return new ArrayList(Arrays.asList(mdls));
}
public void testUniformJoint () throws Exception
{
FactorGraph mdl = RandomGraphs.createUniformChain (3);
double expected = -Math.log (8);
for (int i = 0; i < allAlgs.length; i++) {
Inferencer inf = (Inferencer) allAlgs[i].newInstance ();
inf.computeMarginals (mdl);
for (AssignmentIterator it = mdl.assignmentIterator (); it.hasNext ();) {
Assignment assn = it.assignment ();
double actual = inf.lookupLogJoint (assn);
assertEquals ("Incorrect joint for inferencer "+inf, expected, actual, 1e-5);
it.advance ();
}
}
}
public void testJointConsistent () throws Exception
{
for (int i = 0; i < allAlgs.length; i++) {
// for (int mdlIdx = 0; mdlIdx < models.length; mdlIdx++) {
{ int mdlIdx = 13;
Inferencer inf = (Inferencer) allAlgs[i].newInstance();
try {
FactorGraph mdl = models[mdlIdx];
inf.computeMarginals(mdl);
Assignment assn = new Assignment (mdl, new int [mdl.numVariables ()]);
assertEquals (Math.log (inf.lookupJoint (assn)), inf.lookupLogJoint (assn), 1e-5);
} catch (UnsupportedOperationException e) {
// LoopyBP only handles edge ptls
logger.warning("Skipping (" + mdlIdx + "," + i + ")\n" + e);
throw e;
// continue;
}
}
}
}
public void testFactorizedJoint() throws Exception
{
Inferencer[][] infs = new Inferencer[allAlgs.length][models.length];
for (int i = 0; i < allAlgs.length; i++) {
for (int mdl = 0; mdl < models.length; mdl++) {
Inferencer alg = (Inferencer) allAlgs[i].newInstance();
if (alg instanceof TRP) {
((TRP)alg).setRandomSeed (1231234);
}
try {
alg.computeMarginals(models[mdl]);
infs[i][mdl] = alg;
} catch (UnsupportedOperationException e) {
// LoopyBP only handles edge ptls
logger.warning("Skipping (" + mdl + "," + i + ")\n" + e);
throw e;
// continue;
}
}
}
/* Ensure that lookupLogJoint() consistent */
int alg1 = 0; // Brute force
for (int alg2 = 1; alg2 < allAlgs.length; alg2++) {
for (int mdl = 0; mdl < models.length; mdl++) {
Inferencer inf1 = infs[alg1][mdl];
Inferencer inf2 = infs[alg2][mdl];
if ((inf1 == null) || (inf2 == null)) {
continue;
}
Iterator it = models[mdl].assignmentIterator();
while (it.hasNext()) {
try {
Assignment assn = (Assignment) it.next();
double joint1 = inf1.lookupLogJoint(assn);
double joint2 = inf2.lookupLogJoint(assn);
logger.finest("logJoint: " + inf1 + " " + inf2
+ " Model " + mdl
+ " Assn: " + assn
+ " INF1: " + joint1 + "\n"
+ " INF2: " + joint2 + "\n");
assertTrue("logJoint not equal btwn " + GeneralUtils.classShortName (inf1) + " "
+ " and " + GeneralUtils.classShortName (inf2) + "\n"
+ " Model " + mdl + "\n"
+ " INF1: " + joint1 + "\n"
+ " INF2: " + joint2 + "\n",
Math.abs(joint1 - joint2) < 0.2);
double joint3 = inf1.lookupJoint(assn);
assertTrue("logJoint & joint not consistent\n "
+ "Model " + mdl + "\n" + assn,
Maths.almostEquals(joint3, Math.exp(joint1)));
} catch (UnsupportedOperationException e) {
// VarElim doesn't compute log joints. Let it slide
logger.warning("Skipping " + inf1 + " -> " + inf2 + "\n" + e);
continue;
}
}
}
}
}
public void testMarginals() throws Exception
{
Factor[][][] joints = new Factor[models.length][][];
Inferencer[] appxInferencers = constructAllAppxInferencers ();
int numExactAlgs = algorithms.length;
int numAppxAlgs = appxInferencers.length;
int numAlgs = numExactAlgs + numAppxAlgs;
for (int mdl = 0; mdl < models.length; mdl++) {
joints[mdl] = new Factor[numAlgs][];
}
/* Query every known graph with every known alg. */
for (int i = 0; i < algorithms.length; i++) {
for (int mdl = 0; mdl < models.length; mdl++) {
Inferencer alg = (Inferencer) algorithms[i].newInstance();
logger.fine("Computing marginals for model " + mdl + " alg " + alg);
alg.computeMarginals(models[mdl]);
joints[mdl][i] = collectAllMarginals (models [mdl], alg);
}
}
logger.fine("Checking that results are consistent...");
/* Now, make sure the exact marginals are consistent for
* the same model. */
for (int mdl = 0; mdl < models.length; mdl++) {
int maxV = models[mdl].numVariables ();
for (int vrt = 0; vrt < maxV; vrt++) {
for (int alg1 = 0; alg1 < algorithms.length; alg1++) {
for (int alg2 = 0; alg2 < algorithms.length; alg2++) {
Factor joint1 = joints[mdl][alg1][vrt];
Factor joint2 = joints[mdl][alg2][vrt];
try {
// By the time we get here, a joint is null only if
// there was an UnsupportedOperationException.
if ((joint1 != null) && (joint2 != null)) {
assertTrue(joint1.almostEquals(joint2));
}
} catch (AssertionFailedError e) {
System.out.println("\n************************************\nTest FAILED\n\n");
System.out.println("Model " + mdl + " Vertex " + vrt);
System.out.println("Algs " + alg1 + " and " + alg2 + " not consistent.");
System.out.println("MARGINAL from " + alg1);
System.out.println(joint1);
System.out.println("MARGINAL from " + alg2);
System.out.println(joint2);
System.out.println("Marginals from " + alg1 + ":");
for (int i = 0; i < maxV; i++) {
System.out.println(joints[mdl][alg1][i]);
}
System.out.println("Marginals from " + alg2 + ":");
for (int i = 0; i < maxV; i++) {
System.out.println(joints[mdl][alg2][i]);
}
models[mdl].dump ();
throw e;
}
}
}
}
}
// Compare all approximate algorithms against brute force.
logger.fine("Checking the approximate algorithms...");
int alg2 = 0; // Brute force
for (int appxIdx = 0; appxIdx < appxInferencers.length; appxIdx++) {
Inferencer alg = appxInferencers [appxIdx];
for (int mdl = 0; mdl < models.length; mdl++) {
logger.finer("Running inference alg " + alg + " with model " + mdl);
try {
alg.computeMarginals(models[mdl]);
} catch (UnsupportedOperationException e) {
// LoopyBP does not support vertex potentials.
// We'll let that slide.
if (alg instanceof AbstractBeliefPropagation) {
logger.warning("Skipping model " + mdl + " for alg " + alg
+ "\nInference unsupported.");
continue;
} else {
throw e;
}
}
/* lookup all marginals */
int vrt = 0;
int alg1 = numExactAlgs + appxIdx;
int maxV = models[mdl].numVariables ();
joints[mdl][alg1] = new Factor[maxV];
for (Iterator it = models[mdl].variablesSet ().iterator();
it.hasNext();
vrt++) {
Variable var = (Variable) it.next();
logger.finer("Lookup marginal for model " + mdl + " vrt " + var + " alg " + alg);
Factor ptl = alg.lookupMarginal(var);
joints[mdl][alg1][vrt] = ptl.duplicate();
}
for (vrt = 0; vrt < maxV; vrt++) {
Factor joint1 = joints[mdl][alg1][vrt];
Factor joint2 = joints[mdl][alg2][vrt];
try {
assertTrue(joint1.almostEquals(joint2, APPX_EPSILON));
} catch (AssertionFailedError e) {
System.out.println("\n************************************\nAppx Marginal Test FAILED\n\n");
System.out.println("Inferencer: " + alg);
System.out.println("Model " + mdl + " Vertex " + vrt);
System.out.println(joint1.dumpToString ());
System.out.println(joint2.dumpToString ());
models[mdl].dump ();
System.out.println("All marginals:");
for (int i = 0; i < maxV; i++) {
System.out.println(joints[mdl][alg1][i].dumpToString ());
}
System.out.println("Correct marginals:");
for (int i = 0; i < maxV; i++) {
System.out.println(joints[mdl][alg2][i].dumpToString ());
}
throw e;
}
}
}
}
System.out.println("Tested " + models.length + " undirected models.");
}
private Inferencer[] constructAllAppxInferencers () throws IllegalAccessException, InstantiationException
{
List algs = new ArrayList (appxAlgs.length * 2);
for (int i = 0; i < appxAlgs.length; i++) {
algs.add (appxAlgs[i].newInstance ());
}
// Add a few that don't fit
algs.add (new TRP ().setMessager (new AbstractBeliefPropagation.SumProductMessageStrategy (0.8)));
algs.add (new LoopyBP ().setMessager (new AbstractBeliefPropagation.SumProductMessageStrategy (0.8)));
algs.add (new SamplingInferencer (new GibbsSampler (10000), 10000));
algs.add (new SamplingInferencer (new ExactSampler (), 1000));
return (Inferencer[]) algs.toArray (new Inferencer [algs.size ()]);
}
private Inferencer[] constructMaxProductInferencers () throws IllegalAccessException, InstantiationException
{
List algs = new ArrayList ();
algs.add (JunctionTreeInferencer.createForMaxProduct ());
algs.add (TRP.createForMaxProduct ());
algs.add (LoopyBP.createForMaxProduct ());
return (Inferencer[]) algs.toArray (new Inferencer [algs.size ()]);
}
private Factor[] collectAllMarginals (FactorGraph mdl, Inferencer alg)
{
int vrt = 0;
int numVertices = mdl.numVariables ();
Factor[] collector = new Factor[numVertices];
for (Iterator it = mdl.variablesSet ().iterator();
it.hasNext();
vrt++) {
Variable var = (Variable) it.next();
try {
collector[vrt] = alg.lookupMarginal(var);
assert collector [vrt] != null
: "Query returned null for model " + mdl + " vertex " + var + " alg " + alg;
} catch (UnsupportedOperationException e) {
// Allow unsupported inference to slide with warning
logger.warning("Warning: Skipping model " + mdl + " for alg " + alg
+ "\n Inference unsupported.");
}
}
return collector;
}
public void testQuery () throws Exception
{
java.util.Random rand = new java.util.Random (15667);
for (int mdlIdx = 0; mdlIdx < models.length; mdlIdx++) {
FactorGraph mdl = models [mdlIdx];
int size = rand.nextInt (3) + 2;
size = Math.min (size, mdl.varSet ().size ());
Collection vars = CollectionUtils.subset (mdl.variablesSet (), size, rand);
Variable[] varArr = (Variable[]) vars.toArray (new Variable [0]);
Assignment assn = new Assignment (varArr, new int [size]);
BruteForceInferencer brute = new BruteForceInferencer();
Factor joint = brute.joint(mdl);
double marginal = joint.marginalize(vars).value (assn);
for (int algIdx = 0; algIdx < appxAlgs.length; algIdx++) {
Inferencer alg = (Inferencer) appxAlgs[algIdx].newInstance();
if (alg instanceof TRP) continue; // trp can't handle disconnected models, which arise during query()
double returned = alg.query (mdl, assn);
assertEquals ("Failure on model "+mdlIdx+" alg "+alg, marginal, returned, APPX_EPSILON);
}
}
logger.info ("Test testQuery passed.");
}
// be careful that caching of inference algorithms does not affect results here.
public void testSerializable () throws Exception
{
for (int i = 0; i < algorithms.length; i++) {
Inferencer alg = (Inferencer) algorithms[i].newInstance();
testSerializationForAlg (alg);
}
for (int i = 0; i < appxAlgs.length; i++) {
Inferencer alg = (Inferencer) appxAlgs[i].newInstance();
testSerializationForAlg (alg);
}
Inferencer[] maxAlgs = constructMaxProductInferencers ();
for (int i = 0; i < maxAlgs.length; i++) {
testSerializationForAlg (maxAlgs [i]);
}
}
private void testSerializationForAlg (Inferencer alg)
throws IOException, ClassNotFoundException
{
for (int mdlIdx = 0; mdlIdx < models.length; mdlIdx++) {
FactorGraph mdl = models [mdlIdx];
// Copy the inferencer before calling b/c of random seed issues.
Inferencer alg2 = (Inferencer) TestSerializable.cloneViaSerialization (alg);
alg.computeMarginals(mdl);
Factor[] pre = collectAllMarginals (mdl, alg);
alg2.computeMarginals (mdl);
Factor[] post2 = collectAllMarginals (mdl, alg2);
compareMarginals ("Error comparing marginals after serialzation on model "+mdl,
pre, post2);
}
}
private void compareMarginals (String msg, Factor[] pre, Factor[] post)
{
for (int i = 0; i < pre.length; i++) {
Factor ptl1 = pre[i];
Factor ptl2 = post[i];
assertTrue (msg + "\n" + ptl1.dumpToString () + "\n" + ptl2.dumpToString (),
ptl1.almostEquals (ptl2, 1e-3));
}
}
// This is really impossible after the change to the factor graph representation
// Tests the measurement of numbers of messages sent
public void ignoreTestNumMessages ()
{
for (int mdlIdx = 0; mdlIdx < models.length; mdlIdx++) {
UndirectedModel mdl = models [mdlIdx];
TRP trp = new TRP ();
trp.computeMarginals (mdl);
int expectedMessages = (mdl.numVariables () - 1) * 2
* trp.iterationsUsed();
assertEquals (expectedMessages, trp.getTotalMessagesSent ());
LoopyBP loopy = new LoopyBP ();
loopy.computeMarginals (mdl);
expectedMessages = mdl.getEdgeSet().size() * 2
* loopy.iterationsUsed();
assertEquals (expectedMessages, loopy.getTotalMessagesSent ());
}
}
private UndirectedModel createJtChain()
{
int numNodes = 4;
Variable[] nodes = new Variable[numNodes];
for (int i = 0; i < numNodes; i++) {
nodes[i] = new Variable(2);
}
Factor[] pots = new TableFactor[]{
new TableFactor (new Variable[]{nodes[0], nodes[1]},
new double[]{1, 2, 5, 4}),
new TableFactor (new Variable[]{nodes[1], nodes[2]},
new double[]{4, 2, 4, 1}),
new TableFactor (new Variable[]{nodes[2], nodes[3]},
new double[]{7, 3, 6, 9}),
};
for (int i = 0; i < pots.length; i++) {
pots[i].normalize();
}
UndirectedModel uGraph = new UndirectedModel();
for (int i = 0; i < numNodes - 1; i++) {
uGraph.addFactor (pots[i]);
}
return uGraph;
}
private static final int JT_CHAIN_TEST_TREE = 2;
private void createTestTrees()
{
Random r = new Random(185);
trees = new FactorGraph[] {
RandomGraphs.createUniformChain (2),
RandomGraphs.createUniformChain (4),
createJtChain(),
createRandomGrid(5, 1, 3, r),
createRandomGrid(6, 1, 2, r),
createRandomTree(10, 2, r),
createRandomTree(10, 2, r),
createRandomTree(8, 3, r),
createRandomTree(8, 3, r),
};
modelsList.addAll(Arrays.asList(trees));
}
private void computeTestTreeMargs()
{
treeMargs = new Factor[trees.length][];
BruteForceInferencer brute = new BruteForceInferencer();
for (int i = 0; i < trees.length; i++) {
FactorGraph mdl = trees[i];
Factor joint = brute.joint(mdl);
treeMargs[i] = new Factor[mdl.numVariables ()];
for (Iterator it = mdl.variablesIterator (); it.hasNext();) {
Variable var = (Variable) it.next();
treeMargs[i][mdl.getIndex(var)] = joint.marginalize(var);
}
}
}
public void testJtConsistency() {
for (int mdlIdx = 0; mdlIdx < models.length; mdlIdx++) {
UndirectedModel mdl = models[mdlIdx];
JunctionTreeInferencer jti = new JunctionTreeInferencer();
JunctionTree jt = jti.buildJunctionTree(mdl);
for (Iterator it = jt.getVerticesIterator(); it.hasNext();) {
VarSet parent = (VarSet) it.next();
for (Iterator it2 = jt.getChildren(parent).iterator(); it2.hasNext();) {
VarSet child = (VarSet) it2.next();
Factor ptl = jt.getSepsetPot(parent, child);
Set intersection = parent.intersection (child);
assertTrue (intersection.equals (ptl.varSet()));
}
}
}
}
private void compareTrpJoint(Factor joint, TRP trp)
{
Assignment assn = null;
double prob1 = 0.0, prob2 = 0.0;
try {
VarSet all = new HashVarSet (joint.varSet());
for (Iterator it = all.assignmentIterator(); it.hasNext();) {
assn = (Assignment) it.next();
prob1 = trp.lookupJoint(assn);
prob2 = joint.value (assn);
// assertTrue (Maths.almostEquals (prob1, prob2));
assertTrue(Math.abs(prob1 - prob2) < 0.01);
}
} catch (AssertionFailedError e) {
System.out.println("*****************************************\nTEST FAILURE in compareTrpJoint");
System.out.println("*****************************************\nat");
System.out.println(assn);
System.out.println("Expected: " + prob2);
System.out.println("TRP: " + prob1);
System.out.println("*****************************************\nExpected joint");
System.out.println(joint);
System.out.println("*****************************************\nTRP dump");
trp.dump();
throw e;
}
}
public void testTrp()
{
final UndirectedModel model = createTriangle();
TRP trp = new TRP().setTerminator (new TRP.IterationTerminator(200));
BruteForceInferencer brute = new BruteForceInferencer();
Factor joint = brute.joint(model);
trp.computeMarginals(model);
// Check joint
// DiscretePotential joint = brute.joint (model);
compareTrpJoint(joint, trp);
// Check all marginals
try {
for (Iterator it = model.variablesIterator (); it.hasNext();) {
Variable var = (Variable) it.next();
Factor marg1 = trp.lookupMarginal(var);
Factor marg2 = joint.marginalize (var);
assertTrue(marg1.almostEquals(marg2, APPX_EPSILON));
}
for (Iterator it = model.factorsIterator(); it.hasNext();) {
Factor factor = (Factor) it.next ();
Factor marg1 = trp.lookupMarginal (factor.varSet ());
Factor marg2 = joint.marginalize (factor.varSet ());
assertTrue(marg1.almostEquals(marg2, APPX_EPSILON));
}
} catch (AssertionFailedError e) {
System.out.println("\n*************************************\nTEST FAILURE in compareTrpMargs");
// System.out.println(marg1);
// System.out.println(marg2);
System.out.println("*************************************\nComplete model:\n\n");
model.dump ();
System.out.println("*************************************\nTRP margs:\n\n");
trp.dump();
System.out.println("**************************************\nAll correct margs:\n");
for (Iterator it2 = model.variablesIterator (); it2.hasNext();) {
Variable v2 = (Variable) it2.next();
brute.computeMarginals (model);
System.out.println(brute.lookupMarginal(v2));
}
throw e;
}
}
public void testTrpJoint()
{
FactorGraph model = createTriangle();
TRP trp = new TRP().setTerminator (new TRP.IterationTerminator(25));
trp.computeMarginals(model);
// For each assignment to the model, check that
// TRP.lookupLogJoint and TRP.lookupJoint are consistent
VarSet all = new HashVarSet (model.variablesSet ());
for (Iterator it = all.assignmentIterator(); it.hasNext();) {
Assignment assn = (Assignment) it.next();
double log = trp.lookupLogJoint(assn);
double prob = trp.lookupJoint(assn);
assertTrue(Maths.almostEquals(Math.exp(log), prob));
}
logger.info("Test trpJoint passed.");
}
/** Tests that running TRP doesn't inadvertantly change potentials
in the original graph. */
public void testTrpNonDestructivity()
{
FactorGraph model = createTriangle();
TRP trp = new TRP(new TRP.IterationTerminator(25));
BruteForceInferencer brute = new BruteForceInferencer();
Factor joint1 = brute.joint(model);
trp.computeMarginals(model);
Factor joint2 = brute.joint(model);
assertTrue(joint1.almostEquals(joint2));
logger.info("Test trpNonDestructivity passed.");
}
public void testTrpReuse()
{
TRP trp1 = new TRP(new TRP.IterationTerminator(25));
for (int i = 0; i < models.length; i++) {
trp1.computeMarginals(models[i]);
}
// Hard to do automatically right now...
logger.info("Please ensure that all instantiations above run for 25 iterations.");
// Ensure that all edges touched works...
UndirectedModel mdl = models[0];
final Tree tree = trp1.new AlmostRandomTreeFactory().nextTree(mdl);
TRP trp2 = new TRP(new TRP.TreeFactory() {
public Tree nextTree(FactorGraph mdl)
{
return tree;
}
});
trp2.computeMarginals(mdl);
logger.info("Ensure that the above instantiation ran for 1000 iterations with a warning.");
}
private static String[] treeStrs = new String[] {
"<TREE>" +
" <VAR NAME='V0'>" +
" <FACTOR VARS='V0 V1'>" +
" <VAR NAME='V1'/>" +
" </FACTOR>" +
" <FACTOR VARS='V0 V2'>" +
" <VAR NAME='V2'/>" +
" </FACTOR>" +
" </VAR>"+
"</TREE>",
"<TREE>" +
" <VAR NAME='V1'>" +
" <FACTOR VARS='V0 V1'>" +
" <VAR NAME='V0'/>" +
" </FACTOR>" +
" <FACTOR VARS='V1 V2'>" +
" <VAR NAME='V2'/>" +
" </FACTOR>" +
" </VAR>"+
"</TREE>",
"<TREE>" +
" <VAR NAME='V0'>" +
" <FACTOR VARS='V0 V1'>" +
" <VAR NAME='V1'>" +
" <FACTOR VARS='V1 V2'>" +
" <VAR NAME='V2'/>" +
" </FACTOR>" +
"</VAR>"+
" </FACTOR>" +
" </VAR>" +
"</TREE>",
"<TREE>" +
" <VAR NAME='V2'>" +
" <FACTOR VARS='V2 V1'>" +
" <VAR NAME='V1'/>" +
" </FACTOR>" +
" <FACTOR VARS='V0 V2'>" +
" <VAR NAME='V0'/>" +
" </FACTOR>" +
" </VAR>"+
"</TREE>",
};
public void testTrpTreeList ()
{
FactorGraph model = createTriangle();
model.getVariable (0).setLabel ("V0");
model.getVariable (1).setLabel ("V1");
model.getVariable (2).setLabel ("V2");
List readers = new ArrayList ();
for (int i = 0; i < treeStrs.length; i++) {
readers.add (new StringReader (treeStrs[i]));
}
TRP trp = new TRP().setTerminator (new TRP.DefaultConvergenceTerminator())
.setFactory (TRP.TreeListFactory.makeFromReaders (model, readers));
trp.computeMarginals(model);
Inferencer jt = new BruteForceInferencer ();
jt.computeMarginals (model);
compareMarginals ("", model, trp, jt);
}
// Verify that variable indices are consistent in undirectected
// models.
public void testUndirectedIndices()
{
for (int mdlIdx = 0; mdlIdx < models.length; mdlIdx++) {
FactorGraph mdl = models[mdlIdx];
for (Iterator it = mdl.variablesIterator (); it.hasNext();) {
Variable var1 = (Variable) it.next();
Variable var2 = mdl.get(mdl.getIndex(var1));
assertTrue("Mismatch in Variable index for " + var1 + " vs "
+ var2 + " in model " + mdlIdx + "\n" + mdl,
var1 == var2);
}
}
logger.info("Test undirectedIndices passed.");
}
// Tests that TRP and max-product propagation return the same
// results when TRP runs for exactly one iteration.
public void testTrpViterbiEquiv()
{
for (int mdlIdx = 0; mdlIdx < trees.length; mdlIdx++) {
FactorGraph mdl = trees[mdlIdx];
TreeBP maxprod = TreeBP.createForMaxProduct ();
TRP trp = TRP.createForMaxProduct ()
.setTerminator (new TRP.IterationTerminator (1));
maxprod.computeMarginals (mdl);
trp.computeMarginals (mdl);
// TRP should return same results as viterbi
for (Iterator it = mdl.variablesIterator (); it.hasNext ();) {
Variable var = (Variable) it.next ();
Factor maxPotBp = maxprod.lookupMarginal (var);
Factor maxPotTrp = trp.lookupMarginal (var);
maxPotBp.normalize ();
maxPotTrp.normalize ();
assertTrue ("TRP 1 iter maxprod propagation not the same as plain maxProd!\n" +
"Trp " + maxPotTrp.dumpToString () + "\n Plain maxprod " + maxPotBp.dumpToString (),
maxPotBp.almostEquals (maxPotTrp));
}
}
}
public void testTrpOnTrees ()
{
for (int mdlIdx = 0; mdlIdx < trees.length; mdlIdx++) {
FactorGraph mdl = trees[mdlIdx];
Inferencer bp = new TreeBP ();
Inferencer trp = new TRP ().setTerminator (new TRP.IterationTerminator (1));
bp.computeMarginals (mdl);
trp.computeMarginals (mdl);
int[] outcomes = new int [mdl.numVariables ()];
Assignment assn = new Assignment (mdl, outcomes);
assertEquals (bp.lookupLogJoint (assn), trp.lookupLogJoint (assn), 1e-5);
Arrays.fill (outcomes, 1);
assn = new Assignment (mdl, outcomes);
assertEquals (bp.lookupLogJoint (assn), trp.lookupLogJoint (assn), 1e-5);
// TRP should return same results as viterbi
for (Iterator it = mdl.variablesIterator (); it.hasNext ();) {
Variable var = (Variable) it.next ();
Factor maxPotBp = bp.lookupMarginal (var);
Factor maxPotTrp = trp.lookupMarginal (var);
maxPotBp.normalize ();
maxPotTrp.normalize ();
assertTrue ("TRP 1 iter bp propagation not the same as plain maxProd!\n" +
"Trp " + maxPotTrp.dumpToString () + "\n Plain bp " + maxPotBp.dumpToString (),
maxPotBp.almostEquals (maxPotTrp));
}
}
}
// Tests that TRP and max-product propagation return the same
// results when TRP is allowed to run to convergence.
public void testTrpViterbiEquiv2()
{
for (int mdlIdx = 0; mdlIdx < trees.length; mdlIdx++) {
FactorGraph mdl = trees[mdlIdx];
Inferencer maxprod = TreeBP.createForMaxProduct ();
TRP trp = TRP.createForMaxProduct ();
maxprod.computeMarginals (mdl);
trp.computeMarginals (mdl);
// TRP should return same results as viterbi
for (Iterator it = mdl.variablesIterator (); it.hasNext ();) {
Variable var = (Variable) it.next ();
Factor maxPotBp = maxprod.lookupMarginal (var);
Factor maxPotTrp = trp.lookupMarginal (var);
assertTrue ("TRP maxprod propagation not the same as plain maxProd!\n" +
"Trp " + maxPotTrp + "\n Plain maxprod " + maxPotBp,
maxPotBp.almostEquals (maxPotTrp));
}
}
}
public void testTreeViterbi()
{
for (int mdlIdx = 0; mdlIdx < trees.length; mdlIdx++) {
FactorGraph mdl = trees[mdlIdx];
BruteForceInferencer brute = new BruteForceInferencer ();
Inferencer maxprod = TreeBP.createForMaxProduct ();
Factor joint = brute.joint (mdl);
maxprod.computeMarginals (mdl);
for (Iterator it = mdl.variablesIterator (); it.hasNext ();) {
Variable var = (Variable) it.next ();
Factor maxPot = maxprod.lookupMarginal (var);
Factor trueMaxPot = joint.extractMax (var);
maxPot.normalize ();
trueMaxPot.normalize ();
assertTrue ("Maximization failed! Normalized returns:\n" + maxPot
+ "\nTrue: " + trueMaxPot,
maxPot.almostEquals (trueMaxPot));
}
}
logger.info("Test treeViterbi passed: " + trees.length + " models.");
}
public void testJtViterbi()
{
JunctionTreeInferencer jti = new JunctionTreeInferencer();
for (int mdlIdx = 0; mdlIdx < models.length; mdlIdx++) {
UndirectedModel mdl = models[mdlIdx];
BruteForceInferencer brute = new BruteForceInferencer ();
JunctionTreeInferencer maxprod = JunctionTreeInferencer.createForMaxProduct ();
JunctionTree jt = maxprod.buildJunctionTree (mdl);
Factor joint = brute.joint (mdl);
maxprod.computeMarginals (jt);
for (Iterator it = mdl.variablesIterator (); it.hasNext ();) {
Variable var = (Variable) it.next ();
Factor maxPotRaw = maxprod.lookupMarginal (var);
Factor trueMaxPotRaw = joint.extractMax (var);
Factor maxPot = maxPotRaw.duplicate().normalize ();
Factor trueMaxPot = trueMaxPotRaw.duplicate().normalize ();
assertTrue ("Maximization failed on model " + mdlIdx
+ " ! Normalized returns:\n" + maxPot.dumpToString ()
+ "\nTrue: " + trueMaxPot.dumpToString (),
maxPot.almostEquals (trueMaxPot, 0.01));
}
}
logger.info("Test jtViterbi passed.");
}
/*
public void testMM() throws Exception
{
testQuery();
testTreeViterbi();
testTrpViterbiEquiv();
testTrpViterbiEquiv2();
testMaxMarginals();
}
*/
// xxx fails because of TRP termination
// i.e., always succeeds if termination is IterationTermination (10)
// but usually fails if termination is DefaultConvergenceTerminator (1e-12, 1000)
// something about selection of random spanning trees???
public void testMaxMarginals() throws Exception
{
for (int mdlIdx = 0; mdlIdx < models.length; mdlIdx++) {
// { int mdlIdx = 4;
FactorGraph mdl = models[mdlIdx];
// if (mdlIdx != 3) {
// Visualizer.showModel(mdl);
// mdl.dump(); System.out.println ("***END MDL "+mdlIdx+"***");
// }
BruteForceInferencer brute = new BruteForceInferencer();
Factor joint = brute.joint(mdl);
// long foo = System.currentTimeMillis ();
// System.out.println(foo);
Inferencer[] algs = constructMaxProductInferencers ();
for (int infIdx = 0; infIdx < algs.length; infIdx++) {
Inferencer inf = algs[infIdx];
if (inf instanceof TRP)
((TRP)inf).setRandomSeed(42);
inf.computeMarginals(mdl);
for (Iterator it = mdl.variablesIterator (); it.hasNext();) {
Variable var = (Variable) it.next();
Factor maxPot = inf.lookupMarginal(var);
Factor trueMaxPot = joint.extractMax(var);
if (maxPot.argmax() != trueMaxPot.argmax()) {
logger.warning("Argmax not equal on model " + mdlIdx + " inferencer "
+ inf + " !\n Factors:\nReturned: " + maxPot +
"\nTrue: " + trueMaxPot);
System.err.println("Dump of model " + mdlIdx + " ***");
mdl.dump ();
assertTrue (maxPot.argmax() == trueMaxPot.argmax());
}
}
}
}
logger.info("Test maxMarginals passed.");
}
public void testBeliefPropagation()
{
for (int mdlIdx = 0; mdlIdx < trees.length; mdlIdx++) {
FactorGraph mdl = trees[mdlIdx];
Inferencer prop = new TreeBP ();
// System.out.println(mdl);
prop.computeMarginals(mdl);
for (Iterator it = mdl.variablesIterator (); it.hasNext();) {
Variable var = (Variable) it.next();
Factor marg1 = treeMargs[mdlIdx][mdl.getIndex(var)];
Factor marg2 = prop.lookupMarginal(var);
try {
assertTrue("Test failed on graph " + mdlIdx + " vertex " + var + "\n" +
"Model: " + mdl + "\nExpected: " + marg1.dumpToString () + "\nActual: " + marg2.dumpToString (),
marg1.almostEquals(marg2, 0.011));
} catch (AssertionFailedError e) {
System.out.println (e.getMessage ());
System.out.println("*******************************************\nMODEL:\n");
mdl.dump ();
System.out.println("*******************************************\nMESSAGES:\n");
((AbstractBeliefPropagation)prop).dump();
throw e;
}
}
}
logger.info("Test beliefPropagation passed.");
}
public void testBpJoint ()
{
for (int mdlIdx = 0; mdlIdx < trees.length; mdlIdx++) {
FactorGraph mdl = trees[mdlIdx];
Inferencer bp = new TreeBP ();
BruteForceInferencer brute = new BruteForceInferencer ();
brute.computeMarginals (mdl);
bp.computeMarginals (mdl);
for (AssignmentIterator it = mdl.assignmentIterator (); it.hasNext();) {
Assignment assn = (Assignment) it.next ();
assertEquals (brute.lookupJoint (assn), bp.lookupJoint (assn), 1e-15);
}
}
}
// Eventially this should be folded into testMarginals, testJoint, etc.
public void testDirectedJt ()
{
DirectedModel bn = createDirectedModel ();
BruteForceInferencer brute = new BruteForceInferencer ();
brute.computeMarginals (bn);
JunctionTreeInferencer jt = new JunctionTreeInferencer ();
jt.computeMarginals (bn);
compareMarginals ("Error comparing junction tree to brute on directed model!",
bn, brute, jt);
}
private DirectedModel createDirectedModel ()
{
int NUM_OUTCOMES = 2;
cc.mallet.util.Randoms random = new cc.mallet.util.Randoms (13413);
Dirichlet dirichlet = new Dirichlet (NUM_OUTCOMES, 1.0);
double[] pA = dirichlet.randomVector (random);
double[] pB = dirichlet.randomVector (random);
TDoubleArrayList pC = new TDoubleArrayList (NUM_OUTCOMES * NUM_OUTCOMES * NUM_OUTCOMES);
for (int i = 0; i < (NUM_OUTCOMES * NUM_OUTCOMES); i++) {
pC.add (dirichlet.randomVector (random));
}
Variable[] vars = new Variable[] { new Variable (NUM_OUTCOMES), new Variable (NUM_OUTCOMES),
new Variable (NUM_OUTCOMES) };
DirectedModel mdl = new DirectedModel ();
mdl.addFactor (new CPT (new TableFactor (vars[0], pA), vars[0]));
mdl.addFactor (new CPT (new TableFactor (vars[1], pB), vars[1]));
mdl.addFactor (new CPT (new TableFactor (vars, pC.toNativeArray ()), vars[2]));
return mdl;
}
private void compareMarginals (String msg, FactorGraph fg, Inferencer inf1, Inferencer inf2)
{
for (int i = 0; i < fg.numVariables (); i++) {
Variable var = fg.get (i);
Factor ptl1 = inf1.lookupMarginal (var);
Factor ptl2 = inf2.lookupMarginal (var);
assertTrue (msg + "\n" + ptl1.dumpToString () + "\n" + ptl2.dumpToString (),
ptl1.almostEquals (ptl2, 1e-5));
}
}
protected void setUp()
{
modelsList = createTestModels();
createTestTrees();
models = (UndirectedModel[]) modelsList.toArray
(new UndirectedModel[]{});
computeTestTreeMargs();
}
public void testMultiply()
{
TableFactor p1 = new TableFactor (new Variable[]{});
System.out.println(p1);
Variable[] vars = new Variable[]{
new Variable(2),
new Variable(2),
};
double[] probs = new double[]{1, 3, 5, 6};
TableFactor p2 = new TableFactor
(vars, probs);
Factor p3 = p1.multiply(p2);
assertTrue("Should be equal: " + p2 + "\n" + p3,
p2.almostEquals(p3));
}
/* TODO: Not sure how to test this anymore.
// Test multiplication of potentials where variables are in
// a different order
public void testMultiplication2 ()
{
Variable[] vars = new Variable[] {
new Variable (2),
new Variable (2),
};
double[] probs1 = new double[] { 2, 4, 1, 6 };
double[] probs2a = new double[] { 3, 7, 6, 5 };
double[] probs2b = new double[] { 3, 6, 7, 5 };
MultinomialPotential ptl1a = new MultinomialPotential (vars, probs1);
MultinomialPotential ptl1b = new MultinomialPotential (vars, probs1);
MultinomialPotential ptl2a = new MultinomialPotential (vars, probs2a);
Variable[] vars2 = new Variable[] { vars[1], vars[0], };
MultinomialPotential ptl2b = new MultinomialPotential (vars2, probs2b);
ptl1a.multiplyBy (ptl2a);
ptl1b.multiplyBy (ptl2b);
assertTrue (ptl1a.almostEquals (ptl1b));
}
*/
public void testLogMarginalize ()
{
FactorGraph mdl = models [0];
Iterator it = mdl.variablesIterator ();
Variable v1 = (Variable) it.next();
Variable v2 = (Variable) it.next();
Random rand = new Random (3214123);
for (int i = 0; i < 10; i++) {
Factor ptl = randomEdgePotential (rand, v1, v2);
Factor logmarg1 = new LogTableFactor ((AbstractTableFactor) ptl).marginalize(v1);
Factor marglog1 = new LogTableFactor((AbstractTableFactor) ptl.marginalize(v1));
assertTrue ("LogMarg failed! Correct: "+marglog1+" Log-marg: "+logmarg1,
logmarg1.almostEquals (marglog1));
Factor logmarg2 = new LogTableFactor ((AbstractTableFactor) ptl).marginalize(v2);
Factor marglog2 = new LogTableFactor((AbstractTableFactor) ptl.marginalize(v2));
assertTrue (logmarg2.almostEquals (marglog2));
}
}
public void testLogNormalize ()
{
FactorGraph mdl = models [0];
Iterator it = mdl.variablesIterator ();
Variable v1 = (Variable) it.next();
Variable v2 = (Variable) it.next();
Random rand = new Random (3214123);
for (int i = 0; i < 10; i++) {
Factor ptl = randomEdgePotential (rand, v1, v2);
Factor norm1 = new LogTableFactor((AbstractTableFactor) ptl);
Factor norm2 = ptl.duplicate();
norm1.normalize();
norm2.normalize();
assertTrue ("LogNormalize failed! Correct: "+norm2+" Log-normed: "+norm1,
norm1.almostEquals (norm2));
}
}
public void testSumLogProb ()
{
java.util.Random rand = new java.util.Random (3214123);
for (int i = 0; i < 10; i++) {
double v1 = rand.nextDouble();
double v2 = rand.nextDouble();
double sum1 = Math.log (v1 + v2);
double sum2 = Maths.sumLogProb (Math.log(v1), Math.log (v2));
// System.out.println("Summing "+v1+" + "+v2);
assertEquals (sum1, sum2, 0.00001);
}
}
public void testInfiniteCost()
{
Variable[] vars = new Variable[3];
for (int i = 0; i < vars.length; i++) {
vars[i] = new Variable (2);
}
FactorGraph mdl = new FactorGraph (vars);
mdl.addFactor (vars[0], vars[1], new double[] { 2, 6, 4, 8 });
mdl.addFactor (vars[1], vars[2], new double[] { 1, 0, 0, 1 });
mdl.dump ();
Inferencer bp = new TreeBP ();
bp.computeMarginals (mdl);
//below should be true, except potentials have different ranges.
//assertTrue (bp.lookupMarginal(vars[1]).almostEquals (bp.lookupMarginal(vars[2])));
}
public void testJtCaching()
{
// clear all caches
for (int i = 0; i < models.length; i++) {
FactorGraph model = models[i];
model.setInferenceCache (JunctionTreeInferencer.class, null);
}
Factor[][] margs = new Factor[models.length][];
long stime1 = new Date().getTime();
for (int i = 0; i < models.length; i++) {
FactorGraph model = models[i];
JunctionTreeInferencer inf = new JunctionTreeInferencer();
inf.computeMarginals(model);
margs[i] = new Factor[model.numVariables ()];
Iterator it = model.variablesIterator ();
int j = -1;
while (it.hasNext()) {
Variable var = (Variable) it.next();
j++;
margs[i][j] = inf.lookupMarginal(var);
}
}
long etime1 = new Date().getTime();
long diff1 = etime1 - stime1;
logger.info ("Pre-cache took "+diff1+" ms.");
long stime2 = new Date().getTime();
for (int i = 0; i < models.length; i++) {
FactorGraph model = models[i];
JunctionTreeInferencer inf = new JunctionTreeInferencer();
inf.computeMarginals(model);
Iterator it = model.variablesIterator ();
int j = -1;
while (it.hasNext()) {
Variable var = (Variable) it.next();
j++;
assertTrue (margs[i][j].almostEquals (inf.lookupMarginal (var)));
}
}
long etime2 = new Date().getTime();
long diff2 = etime2 - stime2;
logger.info ("Post-cache took "+diff2+" ms.");
// assertTrue (diff2 < diff1);
}
public void testFindVariable ()
{
FactorGraph mdl = models [0];
Variable[] vars = new Variable [mdl.numVariables ()];
Iterator it = mdl.variablesIterator ();
while (it.hasNext()) {
Variable var = (Variable) it.next();
String name = new String (var.getLabel());
assertTrue (var == mdl.findVariable (name));
}
assertTrue (mdl.findVariable ("xsdfasdf") == null);
}
public void testDefaultLookupMarginal ()
{
Inferencer inf = new TreeBP ();
FactorGraph mdl = trees[JT_CHAIN_TEST_TREE];
Variable var = mdl.get (0);
inf.computeMarginals (mdl);
// Previously: UnsupportedOperationException
// Exptected: default to lookupMarginal (Variable) for clique of size 1
VarSet varSet = new HashVarSet (new Variable[] { var });
Factor ptl1 = inf.lookupMarginal (varSet);
Factor ptl2 = inf.lookupMarginal (var);
assertTrue (ptl1.almostEquals (ptl2));
Variable var2 = mdl.get (1);
Variable var3 = mdl.get (2);
VarSet c2 = new HashVarSet (new Variable[] { var, var2, var3 });
try {
inf.lookupMarginal (c2);
fail ("Expected an UnsupportedOperationException with clique "+c2);
} catch (UnsupportedOperationException e) {}
}
// Eventually this should be moved to models[], but TRP currently chokes on disconnected
// model
public void testDisconnectedModel ()
{
Variable[] vars = new Variable [4];
for (int i = 0; i < vars.length; i++) {
vars [i] = new Variable (2);
}
FactorGraph mdl = new UndirectedModel (vars);
Random r = new Random (67);
Factor[] ptls = new Factor [4];
Factor[] normed = new Factor [4];
for (int i = 0; i < vars.length; i++) {
ptls[i] = randomNodePotential (r, vars[i]);
normed[i] = ptls[i].duplicate();
normed[i].normalize();
mdl.addFactor (ptls[i]);
}
mdl.dump ();
Inferencer inf = new LoopyBP ();
inf.computeMarginals (mdl);
for (int i = 0; i < vars.length; i++) {
Factor marg = inf.lookupMarginal (vars[i]);
assertTrue ("Marginals not equal!\n True: "+normed[i]+"\n Returned "+marg,
marg.almostEquals (normed[i]));
}
for (AssignmentIterator it = mdl.assignmentIterator (); it.hasNext();) {
Assignment assn = (Assignment) it.next ();
double trueProb = 1.0;
for (int i = 0; i < vars.length; i++) trueProb *= normed[i].value (assn);
assertEquals (trueProb, inf.lookupJoint (assn), 1e-5);
}
}
public void timeMarginalization ()
{
java.util.Random r = new java.util.Random (7732847);
Variable[] vars = new Variable[] { new Variable (2),
new Variable (2),
};
TableFactor ptl = randomEdgePotential (r, vars[0], vars[1]);
long stime = System.currentTimeMillis ();
for (int i = 0; i < 1000; i++) {
Factor marg = ptl.marginalize (vars[0]);
Factor marg2 = ptl.marginalize (vars[1]);
}
long etime = System.currentTimeMillis ();
logger.info ("Marginalization (2-outcome) took "+(etime-stime)+" ms.");
Variable[] vars45 = new Variable[] { new Variable (45),
new Variable (45),
};
TableFactor ptl45 = randomEdgePotential (r, vars45[0], vars45[1]);
stime = System.currentTimeMillis();
for (int i = 0; i < 1000; i++) {
Factor marg = ptl45.marginalize (vars45[0]);
Factor marg2 = ptl45.marginalize (vars45[1]);
}
etime = System.currentTimeMillis();
logger.info ("Marginalization (45-outcome) took "+(etime-stime)+" ms.");
}
// using this for profiling
public void runJunctionTree ()
{
for (int mdlIdx = 0; mdlIdx < models.length; mdlIdx++) {
FactorGraph model = models[mdlIdx];
JunctionTreeInferencer inf = new JunctionTreeInferencer();
inf.computeMarginals(model);
Iterator it = model.variablesIterator ();
while (it.hasNext()) {
Variable var = (Variable) it.next();
inf.lookupMarginal (var);
}
}
}
public void testDestructiveAssignment ()
{
Variable vars[] = { new Variable(2), new Variable (2), };
Assignment assn = new Assignment (vars, new int[] { 0, 1 });
assertEquals (0, assn.get (vars[0]));
assertEquals (1, assn.get (vars[1]));
assn.setValue (vars[0], 1);
assertEquals (1, assn.get (vars[0]));
assertEquals (1, assn.get (vars[1]));
}
public void testLoopyConvergence ()
{
Random r = new Random (67);
FactorGraph mdl = createRandomGrid (5, 5, 2, r);
LoopyBP loopy = new LoopyBP ();
loopy.computeMarginals (mdl);
assertTrue (loopy.iterationsUsed() > 8);
}
public void testSingletonGraph ()
{
Variable v = new Variable (2);
FactorGraph mdl = new FactorGraph (new Variable[] { v });
mdl.addFactor (new TableFactor (v, new double[] { 1, 2 }));
TRP trp = new TRP ();
trp.computeMarginals (mdl);
Factor ptl = trp.lookupMarginal (v);
double[] dbl = ((AbstractTableFactor) ptl).toValueArray ();
assertEquals (2, dbl.length);
assertEquals (0.33333, dbl[0], 1e-4);
assertEquals (0.66666, dbl[1], 1e-4);
}
public void testLoopyCaching ()
{
FactorGraph mdl1 = models[4];
FactorGraph mdl2 = models[5];
Variable var = mdl1.get (0);
LoopyBP inferencer = new LoopyBP ();
inferencer.setUseCaching (true);
inferencer.computeMarginals (mdl1);
Factor origPtl = inferencer.lookupMarginal (var);
assertTrue (2 < inferencer.iterationsUsed ());
// confuse the inferencer
inferencer.computeMarginals (mdl2);
// make sure we have cached, correct results
inferencer.computeMarginals (mdl1);
Factor sndPtl = inferencer.lookupMarginal (var);
// note that we can't use an epsilon here, that's less than our convergence criteria.
assertTrue ("Huh? Original potential:"+origPtl+"After: "+sndPtl,
origPtl.almostEquals (sndPtl, 1e-4));
assertEquals (1, inferencer.iterationsUsed ());
}
public void testJunctionTreeConnectedFromRoot ()
{
JunctionTreeInferencer jti = new JunctionTreeInferencer ();
jti.computeMarginals (models[0]);
jti.computeMarginals (models[1]);
JunctionTree jt = jti.lookupJunctionTree ();
List reached = new ArrayList ();
LinkedList queue = new LinkedList ();
queue.add (jt.getRoot ());
while (!queue.isEmpty ()) {
VarSet current = (VarSet) queue.removeFirst ();
queue.addAll (jt.getChildren (current));
reached.add (current);
}
assertEquals (jt.clusterPotentials ().size (), reached.size());
}
public void testBpLargeModels ()
{
Timing timing = new Timing ();
// UndirectedModel mdl = RandomGraphs.createUniformChain (800);
FactorGraph mdl = RandomGraphs.createUniformChain (8196);
timing.tick ("Model creation");
AbstractBeliefPropagation inf = new LoopyBP ();
try {
inf.computeMarginals (mdl);
} catch (OutOfMemoryError e) {
System.out.println ("OUT OF MEMORY: Messages sent "+inf.getTotalMessagesSent ());
throw e;
}
timing.tick ("Inference time (Random sched BP)");
}
public void testTrpLargeModels ()
{
Timing timing = new Timing ();
// UndirectedModel mdl = RandomGraphs.createUniformChain (800);
FactorGraph mdl = RandomGraphs.createUniformChain (8192);
timing.tick ("Model creation");
Inferencer inf = new TRP ();
inf.computeMarginals (mdl);
timing.tick ("Inference time (TRP)");
}
/*
public void testBpDualEdgeFactor ()
{
Variable[] vars = new Variable[] {
new Variable (2),
new Variable (2),
new Variable (2),
new Variable (2),
};
Random r = new Random ();
Factor tbl1 = createEdgePtl (vars[0], vars[1], r);
Factor tbl2a = createEdgePtl (vars[1], vars[2], r);
Factor tbl2b = createEdgePtl (vars[1], vars[2], r);
Factor tbl3 = createEdgePtl (vars[2], vars[3], r);
FactorGraph fg = new FactorGraph (vars);
fg.addFactor (tbl1);
fg.addFactor (tbl2a);
fg.addFactor (tbl2b);
fg.addFactor (tbl3);
Inferencer inf = new TRP ();
inf.computeMarginals (fg);
VarSet vs = tbl2a.varSet ();
Factor marg1 = inf.lookupMarginal (vs);
Factor prod = TableFactor.multiplyAll (fg.factors ());
Factor marg2 = prod.marginalize (vs);
marg2.normalize ();
assertTrue ("Factors not equal! BP: "+marg1.dumpToString ()+"\n EXACT: "+marg2.dumpToString (), marg1.almostEquals (marg2));
}
*/
private Factor createEdgePtl (Variable var1, Variable var2, Random r)
{
double[] dbls = new double [4];
for (int i = 0; i < dbls.length; i++) {
dbls[i] = r.nextDouble ();
}
return new TableFactor (new Variable[] { var1, var2 }, dbls);
}
private String gridStr =
"VAR alpha u : CONTINUOUS\n" +
"alpha ~ Uniform -1.0 1.0\n" +
"u ~ Uniform -2.0 2.0\n" +
"x00 ~ Unary u\n" +
"x10 ~ Unary u\n" +
"x01 ~ Unary u\n" +
"x11 ~ Unary u\n" +
"x00 x01 ~ Potts alpha\n" +
"x00 x10 ~ Potts alpha\n" +
"x01 x11 ~ Potts alpha\n" +
"x10 x11 ~ Potts alpha\n";
public void testJtConstant () throws IOException
{
FactorGraph masterFg = new ModelReader ().readModel (new BufferedReader (new StringReader (gridStr)));
JunctionTreeInferencer jt = new JunctionTreeInferencer ();
Assignment assn = masterFg.sampleContinuousVars (new cc.mallet.util.Randoms (3214));
FactorGraph fg = (FactorGraph) masterFg.slice (assn);
jt.computeMarginals (fg);
}
public static Test suite()
{
return new TestSuite(TestInference.class);
}
public static void main(String[] args) throws Exception
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite();
for (int i = 0; i < args.length; i++) {
theSuite.addTest(new TestInference(args[i]));
}
} else {
theSuite = (TestSuite) suite();
}
junit.textui.TestRunner.run(theSuite);
}
}
| 59,108 | 32.207303 | 129 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/test/TestGibbsSampler.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.test;
import cc.mallet.grmm.inference.GibbsSampler;
import cc.mallet.grmm.types.Factor;
import cc.mallet.grmm.types.FactorGraph;
import cc.mallet.grmm.types.TableFactor;
import cc.mallet.grmm.types.Variable;
import cc.mallet.util.Randoms;
import junit.framework.*;
/**
* $Id: TestGibbsSampler.java,v 1.1 2007/10/22 21:37:41 mccallum Exp $
*/
public class TestGibbsSampler extends TestCase {
public TestGibbsSampler (String name)
{
super (name);
}
// Tests finding a feasible initial assignment in a sparse model
public void testInitialAssignment ()
{
Variable[] vars = new Variable[] { new Variable (3), new Variable (3), new Variable (3) };
Variable[] vars1 = new Variable[]{ vars[0], vars[1] };
double[] vals1 = new double[] { 0, 0.2, 0.8, 0, 0.7, 0.3, 0, 0.5, 0.5 };
Factor tbl1 = new TableFactor (vars1, vals1);
Variable[] vars2 = new Variable[]{ vars[1], vars[2] };
double[] vals2 = new double[] { 0.2, 0.2, 0.8, 0.7, 0, 0.7, 0.3, 0, 0.5 };
Factor tbl2 = new TableFactor (vars2, vals2);
FactorGraph fg = new FactorGraph ();
fg.multiplyBy (tbl1);
fg.multiplyBy (tbl2);
System.out.println (fg.dumpToString ());
GibbsSampler gs = new GibbsSampler (new Randoms (324123), 10);
gs.sample (fg, 10); // assert no exception
}
/**
* @return a <code>TestSuite</code>
*/
public static TestSuite suite ()
{
return new TestSuite (TestGibbsSampler.class);
}
public static void main (String[] args)
{
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite ();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestGibbsSampler (args[i]));
}
} else {
theSuite = (TestSuite) suite ();
}
junit.textui.TestRunner.run (theSuite);
}
}
| 2,246 | 28.96 | 94 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/FactorGraph.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import gnu.trove.THashMap;
import gnu.trove.THashSet;
import gnu.trove.TObjectObjectProcedure;
import gnu.trove.TIntIntHashMap;
import java.io.*;
import java.util.*;
import cc.mallet.grmm.inference.ExactSampler;
import cc.mallet.grmm.inference.VariableElimination;
import cc.mallet.grmm.util.CSIntInt2ObjectMultiMap;
import cc.mallet.grmm.util.Models;
import cc.mallet.util.Randoms;
import cc.mallet.util.*;
/**
* Class for undirected graphical models.
*
* Created: Mon Sep 15 15:18:30 2003
*
* @author <a href="mailto:[email protected]">Charles Sutton</a>
* @version $Id: FactorGraph.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class FactorGraph implements Factor {
final private List factors = new ArrayList ();
/**
* Set of clique potential for this graph.
* Ordinarily will map Cliques to DiscretePotentials.
*/
final private THashMap clique2ptl = new THashMap ();
private Universe universe;
private TIntIntHashMap projectionMap;
private int[] my2global;
private BidirectionalIntObjectMap factorsAlphabet;
/**
* Duplicate indexing of factors for vertices and edges. These
* arrays are indexed by their Variable's index (see @link{Variable#index})
*/
transient private List[] vertexPots;
transient private CSIntInt2ObjectMultiMap pairwiseFactors;
transient private List[] factorsByVar;
int numNodes;
public FactorGraph () {
super();
numNodes = 0;
setCachesCapacity (0);
factorsAlphabet = new BidirectionalIntObjectMap ();
}
/**
* Create a model with the variables given. This is much faster
* than adding the variables one at a time.
*/
public FactorGraph (Variable[] vars) {
this();
setCachesCapacity (vars.length);
for (int i = 0; i < vars.length; i++) {
cacheVariable (vars [i]);
}
}
public FactorGraph (Factor[] factors)
{
this ();
for (int i = 0; i < factors.length; i++) {
addFactor (factors[i]);
}
}
public FactorGraph (Collection factors)
{
this ();
for (Iterator it = factors.iterator (); it.hasNext ();) {
addFactor ((Factor) it.next ());
}
}
/**
* Create a model with the given capacity (i.e., capacityin terms of number of variable nodes).
* It can expand later, but declaring the capacity in advance if you know it makes many things
* more efficient.
*/
public FactorGraph (int capacity)
{
this ();
setCachesCapacity (capacity);
}
/**************************************************************************
* CACHING
**************************************************************************/
private void clearCaches ()
{
setCachesCapacity (numNodes);
pairwiseFactors.clear ();
projectionMap.clear ();
}
// Increases the size of all the caching arrays that need to be increased when a node is added.
// This can also be called before he caches have been se up.
private void setCachesCapacity (int n)
{
factorsByVar = new List [n];
for (int i = 0; i < n; i++) { factorsByVar[i] = new ArrayList (); }
vertexPots = new List [n];
my2global = new int [n];
if (projectionMap == null) {
projectionMap = new TIntIntHashMap (n);
// projectionMap.setDefaultValue (-1);
} else {
projectionMap.ensureCapacity (n);
}
// no need to recreate edgePots if it exists, since it's a HashMap.
if (pairwiseFactors == null) pairwiseFactors = new CSIntInt2ObjectMultiMap ();
}
private void removeFactor (Factor factor)
{
factors.remove (factor);
clique2ptl.remove (factor.varSet ());
regenerateCaches ();
}
private void removeFactorsOfVariable (final Variable var)
{
for (Iterator it = factors.iterator (); it.hasNext ();) {
Factor ptl = (Factor) it.next ();
if (ptl.varSet ().contains (var)) {
it.remove ();
}
}
clique2ptl.retainEntries(new TObjectObjectProcedure () {
public boolean execute (Object clique, Object ptl) {
return !((VarSet) clique).contains (var);
}
});
}
private void removeFromVariableCaches (Variable victim)
{
Set survivors = new THashSet (variablesSet ());
survivors.remove (victim);
int vi = 0;
TIntIntHashMap dict = new TIntIntHashMap (survivors.size ());
// dict.setDefaultValue (-1); No longer supported, but this.getIndex() written to avoid need for this.
my2global = new int[survivors.size ()];
for (Iterator it = survivors.iterator (); it.hasNext();) {
Variable var = (Variable) it.next ();
int gvi = var.getIndex ();
dict.put (gvi, vi);
my2global [vi] = gvi;
}
projectionMap = dict;
numNodes--; // do this at end b/c it affects getVertexSet()
}
private void recacheFactors ()
{
numNodes = 0;
for (Iterator it = factors.iterator (); it.hasNext ();) {
Factor ptl = (Factor) it.next ();
VarSet vs = ptl.varSet ();
addVarsIfNecessary (vs);
cacheFactor (vs, ptl);
}
}
private void regenerateCaches ()
{
clearCaches ();
recacheFactors ();
}
private void updateFactorCaches ()
{
assert numNodes == numVariables ();
if (vertexPots == null) {
setCachesCapacity (numNodes);
} else if (numNodes > vertexPots.length) {
List[] oldVertexPots = vertexPots;
CSIntInt2ObjectMultiMap oldEdgePots = pairwiseFactors;
List[] oldFactorsByVar = factorsByVar;
int[] oldM2G = my2global;
setCachesCapacity (2*numNodes);
assert (oldEdgePots != null);
System.arraycopy (oldVertexPots, 0, vertexPots, 0, oldVertexPots.length);
System.arraycopy (oldM2G, 0, my2global, 0, oldM2G.length);
for (int i = 0; i < oldFactorsByVar.length; i++) {
factorsByVar[i].addAll (oldFactorsByVar[i]);
}
}
}
private void cacheVariable (Variable var)
{
numNodes++;
updateFactorCaches ();
int gvi = var.getIndex ();
int myvi = numNodes - 1;
projectionMap.put (gvi, myvi);
my2global[myvi] = gvi;
}
private void cacheFactor (VarSet varSet, Factor factor)
{
switch (varSet.size()) {
case 1:
int vidx = getIndex (varSet.get(0));
cacheVariableFactor (vidx, factor);
factorsByVar[vidx].add (factor);
break;
case 2:
int idx1 = getIndex (varSet.get(0));
int idx2 = getIndex (varSet.get(1));
cachePairwiseFactor (idx1, idx2, factor);
break;
default:
for (Iterator it = varSet.iterator (); it.hasNext ();) {
Variable var = (Variable) it.next ();
int idx = getIndex (var);
factorsByVar[idx].add (factor);
}
break;
}
}
private void cacheVariableFactor (int vidx, Factor factor)
{
if (vertexPots[vidx] == null) {
vertexPots[vidx] = new ArrayList (2);
}
vertexPots[vidx].add (factor);
}
private void cachePairwiseFactor (int idx1, int idx2, Factor ptl)
{
pairwiseFactors.add (idx1, idx2, ptl);
pairwiseFactors.add (idx2, idx1, ptl);
factorsByVar[idx1].add (ptl);
factorsByVar[idx2].add (ptl);
}
/**************************************************************************
* ACCESSORS
**************************************************************************/
/** Returns the number of variable nodes in the graph. */
public int numVariables () { return numNodes; }
public Set variablesSet () {
return new AbstractSet () {
public Iterator iterator () { return variablesIterator (); }
public int size () { return numNodes; }
};
}
public Iterator variablesIterator ()
{
return new Iterator () {
private int i = 0;
public boolean hasNext() { return i < numNodes; }
public Object next() { return get(i++); }
public void remove() { throw new UnsupportedOperationException (); }
};
}
/**
* Returns all variables that are adjacent to a given variable in
* this graph---that is, the set of all variables that share a
* factor with this one.
*/
//xxx inefficient. perhaps cache this.
public VarSet getAdjacentVertices (Variable var)
{
HashVarSet c = new HashVarSet ();
List adjFactors = allFactorsContaining (var);
for (Iterator it = adjFactors.iterator (); it.hasNext ();) {
Factor factor = (Factor) it.next ();
c.addAll (factor.varSet ());
}
return c;
}
/**
* Returns collection that contains factors in this model.
*/
public Collection factors () {
return Collections.unmodifiableCollection (factors);
}
/**
* Returns an iterator of all the factors in the graph.
*/
public Iterator factorsIterator ()
{
return factors ().iterator();
}
/**
* Returns an iterator over all assignments to all variables of this
* graphical model.
* @see Assignment
*/
public AssignmentIterator assignmentIterator ()
{
return new DenseAssignmentIterator (varSet ());
}
/**
* Returns an iterator of all the VarSets in the graph
* over which factors are defined.
*/
public Iterator varSetIterator ()
{
return clique2ptl.keySet().iterator();
}
/**
* Returns a unique numeric index for a variable in this model.
* Every UndirectedModel <tt>mdl</tt> maintains a mapping between its
* variables and the integers 0...size(mdl)-1 , which is suitable
* for caching the variables in an array.
* <p>
* <tt>getIndex</tt> and <tt>get</tt> are inverses. That is, if
* <tt>idx == getIndex (var)</tt>, then <tt>get(idx)</tt> will
* return <tt>var</tt>.
* @param var A variable contained in this graphical model
* @return The numeric index of var
* @see #get(int)
*/
public int getIndex (Variable var)
{
int idx = var.getIndex();
if (projectionMap.containsKey(idx)) {
return projectionMap.get(idx);
}
else {
return -1;
}
}
public int getIndex (Factor factor)
{
return factorsAlphabet.lookupIndex (factor, false);
}
/**
* Returns a variable from this model with a given index.
* Every UndirectedModel <tt>mdl</tt> maintains a mapping between its
* variables and the integers 0...size(mdl)-1 , which is suitable
* for caching the variables in an array.
* <P>
* <tt>getIndex</tt> and <tt>get</tt> are inverses. That is, if
* <tt>idx == getIndex (var)</tt>, then <tt>get(idx)</tt> will
* return <tt>var</tt>.
* @see #getIndex(Variable)
*/
public Variable get (int index)
{
int globalIdx = my2global[index];
return universe.get (globalIdx);
}
public Factor getFactor (int i)
{
return (Factor) factorsAlphabet.lookupObject (i);
}
/** Returns the degree of a given variable in this factor graph,
* that is, the number of factors in which the variable is
* an argument.
*/
public int getDegree (Variable var)
{
return allFactorsContaining (var).size ();
}
/**
* Searches this model for a variable with a given name.
* @param name Name to find.
* @return A variable <tt>var</tt> such that <tt>var.getLabel().equals (name)</tt>
*/
public Variable findVariable (String name)
{
Iterator it = variablesIterator ();
while (it.hasNext()) {
Variable var = (Variable) it.next();
if (var.getLabel().equals(name)) {
return var;
}
}
return null;
}
/**
* Returns the factor in this graph, if any, whose domain is a given clique.
* @return The factor defined over this clique. Returns null if
* no such factor exists. Will not return
* potential defined over subsets or supersets of this clique.
* @see #addFactor(Factor)
* @see #factorOf(Variable,Variable)
* @see #factorOf(Variable)
*/
public Factor factorOf (VarSet varSet)
{
switch (varSet.size ()) {
case 1: return factorOf (varSet.get (0));
case 2: return factorOf (varSet.get (0), varSet.get (1));
default: return factorOf ((Collection) varSet);
}
}
/**
* Returns the factor defined over a given pair of variables.
* <P>
* This method is equivalent to calling {@link #factorOf}
* with a VarSet that contains only <tt>v1</tt> and <tt>v2</tt>.
* <P>
* @param var1 One variable of the pair.
* @param var2 The other variable of the pair.
* @return The factor defined over the pair <tt>(v1, v2)</tt>
* Returns null if no such potential exists.
*/
public Factor factorOf (Variable var1, Variable var2)
{
List ptls = allEdgeFactors (var1, var2);
Factor ptl = firstIfSingleton (ptls, var1+" "+var2);
if (ptl != null) {
assert ptl.varSet().size() == 2;
assert ptl.containsVar (var1);
assert ptl.containsVar (var2);
}
return ptl;
}
private List allEdgeFactors (Variable var1, Variable var2)
{
return pairwiseFactors.get (getIndex (var1), getIndex (var2));
}
/** Returns a collection of all factors that involve only the given variables.
* That is, all factors whose domain is a subset of the given collection.
*/
public Collection allFactorsContaining (Collection vars)
{
THashSet factors = new THashSet ();
for (Iterator it = factorsIterator (); it.hasNext ();) {
Factor ptl = (Factor) it.next ();
if (vars.containsAll (ptl.varSet ()))
factors.add (ptl);
}
return factors;
}
public List allFactorsContaining (Variable var)
{
return factorsByVar [getIndex (var)];
}
/** Returns a list of all factors in the graph whose domain is exactly the specified var. */
public List allFactorsOf (Variable var)
{
int idx = getIndex (var);
if (idx == -1) {
return new ArrayList ();
} else {
return vertexPots [idx];
}
}
/** Returns a list of all factors in the graph whose domain is exactly the specified Collection of Variables. */
public List allFactorsOf (Collection c)
{
// Rather than iterating over all factors, just iterate over ones that we know contain c.get(0)
// (could possibly make more efficient by picking the var with smallest degree).
Variable v0 = (Variable) c.iterator ().next ();
List factors = factorsByVar[getIndex (v0)];
List ret = new ArrayList ();
for (Iterator it = factors.iterator(); it.hasNext();) {
Factor f = (Factor) it.next ();
VarSet varSet = f.varSet ();
if (varSet.size() == c.size ()) {
if (c.containsAll (varSet) && varSet.containsAll (c)) {
ret.add (f);
}
}
}
return ret;
}
/**************************************************************************
* MUTATORS
**************************************************************************/
/**
* Removes a variable from this model, along with all of its factors.
*/
public void remove (Variable var)
{
removeFromVariableCaches (var);
removeFactorsOfVariable (var);
regenerateCaches ();
}
/**
* Removes a Collection of variables from this model, along with all of its factors.
* This is equivalent to calling remove(Variable) on each element of the collection, but
* because of the caching performed elsewhere in this class, this method is vastly
* more efficient.
*/
public void remove (Collection vars)
{
for (Iterator it = vars.iterator (); it.hasNext();) {
Variable var = (Variable) it.next ();
removeFactorsOfVariable (var);
}
numNodes -= vars.size ();
regenerateCaches ();
}
/**
* Returns whether two variables are adjacent in the model's graph.
* @param v1 A variable in this model
* @param v2 Another variable in this model
* @return Whether there is an edge connecting them
*/
public boolean isAdjacent (Variable v1, Variable v2)
{
List factors = allFactorsContaining (v1);
Iterator it = factors.iterator ();
while (it.hasNext()) {
Factor ptl = (Factor) it.next ();
if (ptl.varSet ().contains (v2)) {
return true;
}
}
return false;
}
/**
* Returns whether this variable is part of the model.
* @param v1 Any Variable object
* @return true if this variable is contained in the moel.
*/
public boolean containsVar (Variable v1)
{
return variablesSet ().contains (v1);
}
public void addFactor (Variable var1, Variable var2, double[] probs)
{
Variable[] vars = new Variable[] { var1, var2 };
TableFactor pot = new TableFactor (vars, probs);
addFactor (pot);
}
/**
* Adds a factor to the model.
* <P>
* If a factor has already been added for the variables in the
* given clique, the effects of this method are (currently)
* undefined.
* <p>
* All convenience methods for adding factors eventually call through
* to this one, so this is the method for subclasses to override if they
* wish to perform additional actions when a factor is added to the graph.
*
* @param factor A factor over the variables in clique.
*/
public void addFactor (Factor factor)
{
beforeFactorAdd (factor);
VarSet varSet = factor.varSet ();
addVarsIfNecessary (varSet);
factors.add (factor);
factorsAlphabet.lookupIndex (factor);
addToListMap (clique2ptl, varSet, factor);
// cache the factor
cacheFactor (varSet, factor);
afterFactorAdd (factor);
}
/** Performs checking of a factor before it is added to the model.
* This method should throw an unchecked exception if there is a problem.
* This implementation does nothing, but it may be overridden by subclasses.
* @param factor Factor that is about to be added
*/
protected void beforeFactorAdd (Factor factor) {}
/** Performs operations on a factor after it has been added to the model,
* such as caching.
* This implementation does nothing, but it may be overridden by subclasses.
* @param factor Factor that has just been added
*/
protected void afterFactorAdd (Factor factor) {}
private void addToListMap (Map map, Object key, Object value)
{
List lst = (List) map.get (key);
if (lst == null) {
lst = new ArrayList ();
map.put (key, lst);
}
lst.add (value);
}
private void addVarsIfNecessary (VarSet varSet)
{
for (int i = 0; i < varSet.size(); i++) {
Variable var = varSet.get (i);
if (universe == null) { universe = var.getUniverse (); }
if (getIndex (var) < 0) {
cacheVariable (var);
}
}
}
/**
* Removes all potentias from this model.
*/
public void clear ()
{
factorsAlphabet = new BidirectionalIntObjectMap ();
factors.clear ();
clique2ptl.clear ();
clearCaches ();
numNodes = 0;
}
/**
* Returns the unnormalized probability for an assignment to the
* model. That is, the value return is
* <pre>
\prod_C \phi_C (assn)
</pre>
* where C ranges over all cliques for which factors have been defined.
*
* @param assn An assignment for all variables in this model.
* @return The unnormalized probability
*/
public double factorProduct (Assignment assn)
{
Iterator ptlIter = factorsIterator ();
double ptlProd = 1;
while (ptlIter.hasNext())
{
ptlProd *= ((Factor)ptlIter.next()).value (assn);
}
return ptlProd;
}
/**
* Returns the factor for a given node. That is, this method returns the
* factor whose domain is exactly this node.
* <P>
* This method is equivalent to calling {@link #factorOf}
* with a clique object that contains only <tt>v</tt>.
* <P>
* @param var which the factor is over.
* @throws RuntimeException If the model contains more than one factor over the given variable. Use allFactorsOf in this case.
* @return The factor defined over the edge <tt>v</tt>
* (such as by {@link #addFactor(Factor)}). Returns null if
* no such factor exists.
*/
public Factor factorOf (Variable var)
{
List lst = allFactorsOf (var);
return firstIfSingleton (lst, var.toString ());
}
private Factor firstIfSingleton (List lst, String desc)
{
if (lst == null) return null;
int sz = lst.size ();
if (sz > 1) {
throw new RuntimeException ("Multiple factors over "+desc+":\n"+ CollectionUtils.dumpToString (lst, " "));
} else if (sz == 0) {
return null;
} else {
return (Factor) lst.get (0);
}
}
/**
* Searches the graphical model for a factor over the given
* collection of variables.
* @return The factor defined over the given collection. Returns null if
* no such factor exists. Will not return
* factors defined over subsets or supersets of the given collection.
* @throws RuntimeException If multiple factors exist over the given collection.
* @see #allFactorsOf(java.util.Collection)
* @see #addFactor(Factor)
* @see #factorOf(VarSet)
*/
public Factor factorOf (Collection c)
{
List factors = allFactorsOf (c);
return firstIfSingleton (factors, c.toString ());
}
/**
* Returns a copy of this model. The variable objects are shared
* between this model and its copy, but the factor objects are deep-copied.
*/
public Factor duplicate ()
{
FactorGraph dup = new FactorGraph (numVariables ());
try {
for (Iterator it = variablesSet ().iterator(); it.hasNext();) {
Variable var = (Variable) it.next();
dup.cacheVariable (var);
}
for (Iterator it = factorsIterator (); it.hasNext();) {
Factor pot = (Factor) it.next();
dup.addFactor (pot.duplicate ());
}
} catch (Exception e) {
e.printStackTrace ();
}
return dup;
}
/**
* Dumps all the variables and factors of the model to
* <tt>System.out</tt> in human-readable text.
*/
public void dump ()
{
dump (new PrintWriter (new OutputStreamWriter (System.out), true));
}
public void dump (PrintWriter out)
{
out.println(this);
out.println("Factors = "+clique2ptl);
for (Iterator it = factors.iterator(); it.hasNext();) {
Factor pot = (Factor) it.next();
out.println(pot.dumpToString ());
}
}
public String dumpToString ()
{
StringWriter out = new StringWriter ();
dump (new PrintWriter (out));
return out.toString ();
}
/**************************************************************************
* FACTOR IMPLEMENTATION
**************************************************************************/
public double value (Assignment assn)
{
return Math.exp (logValue (assn));
}
public double value (AssignmentIterator it)
{
return value (it.assignment ());
}
// uses brute-force algorithm
public Factor normalize ()
{
VariableElimination inf = new VariableElimination ();
double Z = inf.computeNormalizationFactor (this);
addFactor (new ConstantFactor (1.0/Z));
return this;
}
public Factor marginalize (Variable[] vars)
{
throw new UnsupportedOperationException ("not yet implemented");
}
public Factor marginalize (Collection vars)
{
if (numVariables () < 5) {
return asTable ().marginalize (vars);
} else {
throw new UnsupportedOperationException ("not yet implemented");
}
}
public Factor marginalize (Variable var)
{
VariableElimination inf = new VariableElimination ();
return inf.unnormalizedMarginal (this, var);
}
public Factor marginalizeOut (Variable var)
{
throw new UnsupportedOperationException ("not yet implemented");
}
public Factor marginalizeOut (VarSet varset)
{
throw new UnsupportedOperationException ("not yet implemented");
}
public Factor extractMax (Collection vars)
{
if (numVariables () < 5) {
return asTable ().extractMax (vars);
} else {
throw new UnsupportedOperationException ("not yet implemented");
}
}
public Factor extractMax (Variable var)
{
if (numVariables () < 5) {
return asTable ().extractMax (var);
} else {
throw new UnsupportedOperationException ("not yet implemented");
}
}
public Factor extractMax (Variable[] vars)
{
if (numVariables () < 5) {
return asTable ().extractMax (vars);
} else {
throw new UnsupportedOperationException ("not yet implemented");
}
}
// xxx should return an Assignment
public int argmax ()
{
throw new UnsupportedOperationException ("not yet implemented");
}
// Assumes that structure of factor graph is continous --> discrete
public Assignment sample (Randoms r)
{
Variable[] contVars = Factors.continuousVarsOf (this);
if ((contVars.length == 0) || (contVars.length == numVariables ())) {
return sampleInternal (r);
} else {
Assignment paramAssn = sampleContinuousVars (contVars, r);
FactorGraph discreteSliceFg = (FactorGraph) this.slice (paramAssn);
Assignment discreteAssn = discreteSliceFg.sampleInternal (r);
return Assignment.union (paramAssn, discreteAssn);
}
}
/** Samples the continuous variables in this factor graph. */
public Assignment sampleContinuousVars (Randoms r)
{
Variable[] contVars = Factors.continuousVarsOf (this);
return sampleContinuousVars (contVars, r);
}
private Assignment sampleContinuousVars (Variable[] contVars, Randoms r)
{
Collection contFactors = allFactorsContaining (Arrays.asList (contVars));
FactorGraph contFg = new FactorGraph (contVars);
for (Iterator it = contFactors.iterator (); it.hasNext ();) {
Factor factor = (Factor) it.next ();
contFg.multiplyBy (factor);
}
return contFg.sampleInternal (r);
}
private Assignment sampleInternal (Randoms r)
{
ExactSampler sampler = new ExactSampler (r);
return sampler.sample (this, 1);
}
public double sum ()
{
VariableElimination inf = new VariableElimination ();
return inf.computeNormalizationFactor (this);
}
public double entropy ()
{
throw new UnsupportedOperationException ("not yet implemented");
}
public Factor multiply (Factor dist)
{
FactorGraph fg = (FactorGraph) duplicate ();
fg.addFactor (dist);
return fg;
}
public void multiplyBy (Factor pot)
{
addFactor (pot);
}
public void exponentiate (double power)
{
throw new UnsupportedOperationException ("not yet implemented");
}
public void divideBy (Factor pot)
{
if (factors.contains (pot)) {
removeFactor (pot);
} else {
throw new UnsupportedOperationException ("not yet implemented");
}
}
public VarSet varSet ()
{
return new HashVarSet (variablesSet());
}
public boolean almostEquals (Factor p)
{
throw new UnsupportedOperationException ();
}
public boolean almostEquals (Factor p, double epsilon)
{
throw new UnsupportedOperationException ("not yet implemented");
}
public boolean isNaN ()
{
for (int fi = 0; fi < factors.size (); fi++) {
if (getFactor (fi).isNaN ())
return true;
}
return false;
}
public double logValue (AssignmentIterator it)
{
return logValue (it.assignment ());
}
public double logValue (int loc)
{
throw new UnsupportedOperationException ();
}
public Variable getVariable (int i)
{
return get (i);
}
// todo: merge this in
public Factor slice (Assignment assn)
{
return slice (assn, null);
}
public Factor slice (Assignment assn, Map toSlicedMap)
{
return Models.addEvidence (this, assn, toSlicedMap);
}
/**************************************************************************
* CACHING FACILITY FOR THE USE OF INFERENCE ALGORITHMS
**************************************************************************/
transient THashMap inferenceCaches = new THashMap();
/**
* Caches some information about this graph that is specific to
* a given type of inferencer (e.g., a junction tree).
* @param inferencer Class of inferencer that can use this
* information
* @param info The information to cache.
* @see #getInferenceCache
*/
public void setInferenceCache (Class inferencer, Object info)
{
inferenceCaches.put (inferencer, info);
}
/**
* Caches some information about this graph that is specific to
* a given type of inferencer (e.g., a junction tree).
* @param inferencer Class of inferencer which wants the information
* @return Whatever object was previously cached for inferencer
* using setInferenceCache. Returns null if no object has been cached.
* @see #setInferenceCache
*/
public Object getInferenceCache (Class inferencer)
{
return inferenceCaches.get (inferencer);
}
public void logify ()
{
List oldFactors = new ArrayList (factors);
clear ();
for (Iterator it = oldFactors.iterator (); it.hasNext ();) {
AbstractTableFactor factor = (AbstractTableFactor) it.next ();
addFactor (new LogTableFactor (factor));
}
}
public double logValue (Assignment assn)
{
Iterator ptlIter = factorsIterator ();
double ptlProd = 0;
while (ptlIter.hasNext())
{
ptlProd += ((Factor)ptlIter.next()).logValue (assn);
}
return ptlProd;
}
public AbstractTableFactor asTable ()
{
return TableFactor.multiplyAll (factors).asTable ();
}
public String toString ()
{
StringBuffer buf = new StringBuffer ();
buf.append ("FactorGraph: Variables ");
for (int i = 0; i < numNodes; i++) {
Variable var = get (i);
buf.append (var);
buf.append (",");
}
buf.append ("\n");
buf.append ("Factors: ");
for (Iterator it = factors.iterator (); it.hasNext ();) {
Factor factor = (Factor) it.next ();
buf.append ("[");
buf.append (factor.varSet ());
buf.append ("],");
}
buf.append ("\n");
return buf.toString ();
}
public void printAsDot (PrintWriter out)
{
out.println ("graph model {");
outputEdgesAsDot (out);
out.println ("}");
}
private static final String[] colors = { "red", "green", "blue", "yellow" };
public void printAsDot (PrintWriter out, Assignment assn)
{
out.println ("graph model {");
outputEdgesAsDot (out);
for (Iterator it = variablesIterator (); it.hasNext();) {
Variable var = (Variable) it.next ();
int value = assn.get(var);
String color = colors[value];
out.println (var.getLabel ()+" [style=filled fillcolor="+color+"];");
}
out.println ("}");
}
private void outputEdgesAsDot (PrintWriter out)
{
int ptlIdx = 0;
for (Iterator it = factors ().iterator(); it.hasNext();) {
Factor ptl = (Factor) it.next ();
VarSet vars = ptl.varSet ();
for (Iterator varIt = vars.iterator (); varIt.hasNext ();) {
Variable var = (Variable) varIt.next ();
out.print ("PTL"+ptlIdx+" -- "+var.getLabel ());
out.println (";\n");
}
ptlIdx++;
}
}
// Serialization garbage
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException
{
out.defaultWriteObject ();
out.writeInt (CURRENT_SERIAL_VERSION);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException
{
in.defaultReadObject ();
in.readInt (); // int version = ...
regenerateCaches ();
}
}
| 31,541 | 26.309091 | 130 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/BinaryUnaryFactor.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import cc.mallet.util.Randoms;
/**
* A factor over a continuous variable theta and binary variables <tt>var</tt>.
* such that <tt>phi(x|theta)<tt> is Potts. That is, for fixed theta, <tt>phi(x)</tt> = 1
* if all x are equal, and <tt>exp^{-theta}</tt> otherwise.
* $Id: BinaryUnaryFactor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class BinaryUnaryFactor extends AbstractFactor implements ParameterizedFactor {
private Variable theta1;
private Variable theta2;
private Variable var; // The binary variable
public BinaryUnaryFactor (Variable var, Variable theta1, Variable theta2)
{
super (BinaryUnaryFactor.combineVariables (theta1, theta2, var));
this.theta1 = theta1;
this.theta2 = theta2;
this.var = var;
if (var.getNumOutcomes () != 2) {
throw new IllegalArgumentException ("Discrete variable "+var+" in BoltzmannUnary must be binary.");
}
if (!theta1.isContinuous ()) {
throw new IllegalArgumentException ("Parameter "+theta1+" in BinaryUnary must be continuous.");
}
if (!theta2.isContinuous ()) {
throw new IllegalArgumentException ("Parameter "+theta2+" in BinaryUnary must be continuous.");
}
}
private static VarSet combineVariables (Variable theta1, Variable theta2, Variable var)
{
VarSet ret = new HashVarSet ();
ret.add (theta1);
ret.add (theta2);
ret.add (var);
return ret;
}
protected Factor extractMaxInternal (VarSet varSet)
{
throw new UnsupportedOperationException ();
}
protected double lookupValueInternal (int i)
{
throw new UnsupportedOperationException ();
}
protected Factor marginalizeInternal (VarSet varsToKeep)
{
throw new UnsupportedOperationException ();
}
/* Inefficient, but this will seldom be called. */
public double value (AssignmentIterator it)
{
Assignment assn = it.assignment();
Factor tbl = sliceForAlpha (assn);
return tbl.value (assn);
}
private Factor sliceForAlpha (Assignment assn)
{
double th1 = assn.getDouble (theta1);
double th2 = assn.getDouble (theta2);
double[] vals = new double[] { th1, th2 };
return new TableFactor (var, vals);
}
public Factor normalize ()
{
throw new UnsupportedOperationException ();
}
public Assignment sample (Randoms r)
{
throw new UnsupportedOperationException ();
}
public double logValue (AssignmentIterator it)
{
return Math.log (value (it));
}
public Factor slice (Assignment assn)
{
Factor alphSlice = sliceForAlpha (assn);
// recursively slice, in case assn includes some of the xs
return alphSlice.slice (assn);
}
public String dumpToString ()
{
StringBuffer buf = new StringBuffer ();
buf.append ("[BinaryUnary : var=");
buf.append (var);
buf.append (" theta1=");
buf.append (theta1);
buf.append (" theta2=");
buf.append (theta2);
buf.append (" ]");
return buf.toString ();
}
public double sumGradLog (Factor q, Variable param, Assignment paramAssn)
{
Factor q_xs = q.marginalize (var);
Assignment assn;
if (param == theta1) {
assn = new Assignment (var, 0);
} else if (param == theta2) {
assn = new Assignment (var, 1);
} else {
throw new IllegalArgumentException ("Attempt to take gradient of "+this+" wrt "+param+
"but factor does not depend on that variable.");
}
return q_xs.value (assn);
}
public Factor duplicate ()
{
return new BinaryUnaryFactor (var, theta1, theta2);
}
public boolean almostEquals (Factor p, double epsilon)
{
return equals (p);
}
public boolean isNaN ()
{
return false;
}
public boolean equals (Object o)
{
if (this == o) return true;
if (o == null || getClass () != o.getClass ()) return false;
final BinaryUnaryFactor that = (BinaryUnaryFactor) o;
if (theta1 != null ? !theta1.equals (that.theta1) : that.theta1 != null) return false;
if (theta2 != null ? !theta2.equals (that.theta2) : that.theta2 != null) return false;
if (var != null ? !var.equals (that.var) : that.var != null) return false;
return true;
}
public int hashCode ()
{
int result;
result = (theta1 != null ? theta1.hashCode () : 0);
result = 29 * result + (theta2 != null ? theta2.hashCode () : 0);
result = 29 * result + (var != null ? var.hashCode () : 0);
return result;
}
}
| 4,876 | 27.688235 | 107 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/BetaFactor.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import java.io.ObjectInputStream;
import java.io.IOException;
import java.io.ObjectOutputStream;
import cc.mallet.util.Maths;
import cc.mallet.util.Randoms;
/**
* $Id: BetaFactor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class BetaFactor extends AbstractFactor {
transient private Variable var;
transient private double min;
transient private double max;
transient private double alpha;
transient private double beta;
transient private double beta12;
public BetaFactor (Variable var, double alpha, double beta)
{
this (var, alpha, beta, 0, 1);
}
public BetaFactor (Variable var, double alpha, double beta, double min, double max)
{
super (new HashVarSet (new Variable[] { var }));
if (!var.isContinuous ()) throw new IllegalArgumentException ();
if (min >= max) throw new IllegalArgumentException ();
this.var = var;
this.min = min;
this.max = max;
this.alpha = alpha;
this.beta = beta;
setBeta12 ();
}
private void setBeta12 ()
{
beta12 = 1 / Maths.beta (alpha, beta);
}
protected Factor extractMaxInternal (VarSet varSet)
{
throw new UnsupportedOperationException ();
}
public double value (Assignment assn)
{
double pct = valueToPct (assn.getDouble (var));
if ((0 < pct) && (pct < 1)) {
return beta12 * Math.pow (pct, (alpha - 1.0)) * Math.pow ((1-pct), (beta -1.0));
} else {
return 0;
}
}
private double valueToPct (double val)
{
return (val - min) / (max - min);
}
private double pctToValue (double pct)
{
return (pct * (max - min)) + min;
}
protected double lookupValueInternal (int i)
{
throw new UnsupportedOperationException ();
}
protected Factor marginalizeInternal (VarSet varsToKeep)
{
if (varsToKeep.contains (var)) {
return duplicate ();
} else {
return new ConstantFactor (1.0);
}
}
public Factor normalize ()
{
return this;
}
public Assignment sample (Randoms r)
{
double pct = r.nextBeta (alpha, beta);
double val = pctToValue (pct);
return new Assignment (var, val);
}
public boolean almostEquals (Factor p, double epsilon)
{
return equals (p);
}
public Factor duplicate ()
{
return new BetaFactor (var, alpha, beta, min, max);
}
public boolean isNaN ()
{
return Double.isNaN(alpha) || Double.isNaN(beta) || Double.isNaN (min) || Double.isNaN (max)
|| alpha <= 0 || beta <= 0;
}
public String dumpToString ()
{
return toString ();
}
public void multiplyBy (Factor f)
{
if (f instanceof ConstantFactor) {
double val = f.value (new Assignment());
// NormalFactor must be normalized right now...
if (Maths.almostEquals (val, 1.0)) {
return; // ok, it's an identity factor
}
}
throw new UnsupportedOperationException ("Can't multiply BetaFactor by "+f);
}
public void divideBy (Factor f)
{
if (f instanceof ConstantFactor) {
double val = f.value (new Assignment());
// NormalFactor must be normalized right now...
if (Maths.almostEquals (val, 1.0)) {
return; // ok, it's an identity factor
}
}
throw new UnsupportedOperationException ("Can't divide BetaFactor by "+f);
}
public String toString ()
{
return "[BetaFactor("+alpha +", "+beta +") "+var+" scale=("+min+" ... " +max+") ]";
}
public Factor slice (Assignment assn)
{
if (assn.containsVar (var)) {
return new ConstantFactor (value (assn));
} else return duplicate ();
}
// serialization nonsense
private static final long serialVersionUID = 1L;
private static final int SERIAL_VERSION = 1;
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException
{
in.defaultReadObject ();
in.readInt (); // serial version
var = (Variable) in.readObject ();
alpha = in.readDouble ();
beta = in.readDouble ();
min = in.readDouble ();
max = in.readDouble ();
}
private void writeObject (ObjectOutputStream out) throws IOException, ClassNotFoundException
{
out.defaultWriteObject ();
out.writeInt (SERIAL_VERSION);
out.writeObject (var);
out.writeDouble (alpha);
out.writeDouble (beta);
out.writeDouble (min);
out.writeDouble (max);
setBeta12 ();
}
}
| 4,803 | 23.891192 | 96 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/DenseAssignmentIterator.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
class DenseAssignmentIterator extends AbstractAssignmentIterator implements AssignmentIterator {
private int current = 0;
DenseAssignmentIterator (VarSet verts)
{
super (verts);
}
DenseAssignmentIterator (VarSet verts, int index)
{
super (verts);
current = index;
if (current >= max) {
throw new IllegalArgumentException ("No assigment # "+index +" for "+this+". Max is "+max);
}
}
public void advance()
{
current++;
}
//xxx wise to make public?
public int indexOfCurrentAssn () { return current; }
public boolean hasNext() {
return current < max;
}
}
| 1,071 | 23.930233 | 98 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/SkeletonFactor.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
/**
* A subclass of Factor in which all operations throw an UnsupportedOperationException.
* This is useful for creating special-purpose factor classes that support only a few operations.
* $Id: SkeletonFactor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class SkeletonFactor extends AbstractFactor {
protected Factor extractMaxInternal (VarSet varSet)
{
throw new UnsupportedOperationException ();
}
protected double lookupValueInternal (int i)
{
throw new UnsupportedOperationException ();
}
protected Factor marginalizeInternal (VarSet varsToKeep)
{
throw new UnsupportedOperationException ();
}
public boolean almostEquals (Factor p, double epsilon)
{
throw new UnsupportedOperationException ();
}
public boolean isNaN () { return false; }
public Factor normalize ()
{
throw new UnsupportedOperationException ();
}
public Assignment sample (cc.mallet.util.Randoms r)
{
throw new UnsupportedOperationException ();
}
public Factor duplicate ()
{
throw new UnsupportedOperationException ();
}
public String dumpToString ()
{
return toString ();
}
public Factor slice (Assignment assn)
{
throw new UnsupportedOperationException ();
}
}
| 1,702 | 25.609375 | 97 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/PottsTableFactor.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import cc.mallet.grmm.util.Matrices;
import cc.mallet.types.Matrix;
import cc.mallet.types.SparseMatrixn;
import cc.mallet.util.Randoms;
/**
* A factor over a continuous variable alpha and discrete variables <tt>x</tt>
* such that <tt>phi(x|alpha)<tt> is Potts. That is, for fixed alpha, <tt>phi(x)</tt> = 1
* if all x are equal, and <tt>exp^{-alpha}</tt> otherwise.
* $Id: PottsTableFactor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class PottsTableFactor extends AbstractFactor implements ParameterizedFactor {
private Variable alpha;
private VarSet xs;
public PottsTableFactor (VarSet xs, Variable alpha)
{
super (combineVariables (alpha, xs));
this.alpha = alpha;
this.xs = xs;
if (!alpha.isContinuous ()) throw new IllegalArgumentException ("alpha must be continuous");
}
public PottsTableFactor (Variable x1, Variable x2, Variable alpha)
{
super (new HashVarSet (new Variable[] { x1, x2, alpha }));
this.alpha = alpha;
this.xs = new HashVarSet (new Variable[] { x1, x2 });
if (!alpha.isContinuous ()) throw new IllegalArgumentException ("alpha must be continuous");
}
private static VarSet combineVariables (Variable alpha, VarSet xs)
{
VarSet ret = new HashVarSet (xs);
ret.add (alpha);
return ret;
}
protected Factor extractMaxInternal (VarSet varSet)
{
throw new UnsupportedOperationException ();
}
protected double lookupValueInternal (int i)
{
throw new UnsupportedOperationException ();
}
protected Factor marginalizeInternal (VarSet varsToKeep)
{
throw new UnsupportedOperationException ();
}
/* Inefficient, but this will seldom be called. */
public double value (AssignmentIterator it)
{
Assignment assn = it.assignment();
Factor tbl = sliceForAlpha (assn);
return tbl.value (assn);
}
private Factor sliceForAlpha (Assignment assn)
{
double alph = assn.getDouble (alpha);
int[] sizes = sizesFromVarSet (xs);
Matrix diag = Matrices.diag (sizes, alph);
Matrix matrix = Matrices.constant (sizes, -alph);
matrix.plusEquals (diag);
return LogTableFactor.makeFromLogMatrix (xs.toVariableArray (), (SparseMatrixn) matrix);
}
private int[] sizesFromVarSet (VarSet xs)
{
int[] szs = new int [xs.size ()];
for (int i = 0; i < xs.size (); i++) {
szs[i] = xs.get (i).getNumOutcomes ();
}
return szs;
}
public Factor normalize ()
{
throw new UnsupportedOperationException ();
}
public Assignment sample (Randoms r)
{
throw new UnsupportedOperationException ();
}
public double logValue (AssignmentIterator it)
{
return Math.log (value (it));
}
public Factor slice (Assignment assn)
{
Factor alphSlice = sliceForAlpha (assn);
// recursively slice, in case assn includes some of the xs
return alphSlice.slice (assn);
}
public String dumpToString ()
{
StringBuffer buf = new StringBuffer ();
buf.append ("[Potts: alpha:");
buf.append (alpha);
buf.append (" xs:");
buf.append (xs);
buf.append ("]");
return buf.toString ();
}
public double sumGradLog (Factor q, Variable param, Assignment theta)
{
if (param != alpha) throw new IllegalArgumentException ();
Factor q_xs = q.marginalize (xs);
double qDiff = 0.0;
for (AssignmentIterator it = xs.assignmentIterator (); it.hasNext(); it.advance()) {
Assignment assn = it.assignment ();
if (!isAllEqual (assn)) {
qDiff += -q_xs.value (it);
}
}
return qDiff;
}
public double secondDerivative (Factor q, Variable param, Assignment theta)
{
double e_x = sumGradLog (q, param, theta);
Factor q_xs = q.marginalize (xs);
double e_x2 = 0.0;
for (AssignmentIterator it = xs.assignmentIterator (); it.hasNext(); it.advance()) {
Assignment assn = it.assignment ();
if (!isAllEqual (assn)) {
e_x2 += q_xs.value (it);
}
}
return e_x2 - (e_x * e_x);
}
private boolean isAllEqual (Assignment assn)
{
Object val1 = assn.getObject (xs.get (0));
for (int i = 1; i < xs.size (); i++) {
Object val2 = assn.getObject (xs.get (i));
if (!val1.equals (val2)) return false;
}
return true;
}
public Factor duplicate ()
{
return new PottsTableFactor (xs, alpha);
}
public boolean isNaN ()
{
return false;
}
public boolean almostEquals (Factor p, double epsilon)
{
return equals (p);
}
public boolean equals (Object o)
{
if (this == o) return true;
if (o == null || getClass () != o.getClass ()) return false;
final PottsTableFactor that = (PottsTableFactor) o;
if (alpha != null ? !alpha.equals (that.alpha) : that.alpha != null) return false;
if (xs != null ? !xs.equals (that.xs) : that.xs != null) return false;
return true;
}
public int hashCode ()
{
int result;
result = (alpha != null ? alpha.hashCode () : 0);
result = 29 * result + (xs != null ? xs.hashCode () : 0);
return result;
}
}
| 5,501 | 26.237624 | 96 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/SparseAssignmentIterator.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
/**
* Created: Dec 15, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: SparseAssignmentIterator.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
class SparseAssignmentIterator extends AbstractAssignmentIterator {
private int[] indices;
private int sparseIdx = 0;
SparseAssignmentIterator (VarSet verts, int[] indices)
{
super (verts);
this.indices = indices;
}
public void advance() { sparseIdx++; }
public boolean hasNext () { return sparseIdx < indices.length; }
//xxx wise to make public?
public int indexOfCurrentAssn () { return indices [sparseIdx]; }
}
| 1,092 | 32.121212 | 87 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/LogTableFactor.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import java.util.Collection;
import java.util.Iterator;
import cc.mallet.grmm.util.Flops;
import cc.mallet.types.Matrix;
import cc.mallet.types.Matrixn;
import cc.mallet.types.SparseMatrixn;
import cc.mallet.util.Maths;
/**
* Created: Jan 4, 2006
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: LogTableFactor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class LogTableFactor extends AbstractTableFactor {
public LogTableFactor (AbstractTableFactor in)
{
super (in);
probs = (Matrix) in.getLogValueMatrix ().cloneMatrix ();
}
public LogTableFactor (Variable var)
{
super (var);
}
public LogTableFactor (Variable[] allVars)
{
super (allVars);
}
public LogTableFactor (Collection allVars)
{
super (allVars);
}
// Create from
// Used by makeFromLogFactorValues
private LogTableFactor (Variable[] vars, double[] logValues)
{
super (vars, logValues);
}
private LogTableFactor (Variable[] allVars, Matrix probsIn)
{
super (allVars, probsIn);
}
//**************************************************************************/
public static LogTableFactor makeFromValues (Variable[] vars, double[] vals)
{
double[] vals2 = new double [vals.length];
for (int i = 0; i < vals.length; i++) {
vals2[i] = Math.log (vals[i]);
}
return makeFromLogValues (vars, vals2);
}
public static LogTableFactor makeFromLogValues (Variable[] vars, double[] vals)
{
return new LogTableFactor (vars, vals);
}
//**************************************************************************/
void setAsIdentity ()
{
setAll (0.0);
}
public Factor duplicate ()
{
return new LogTableFactor (this);
}
protected AbstractTableFactor createBlankSubset (Variable[] vars)
{
return new LogTableFactor (vars);
}
public Factor normalize ()
{
double sum = logspaceOneNorm ();
if (sum < -500)
System.err.println ("Attempt to normalize all-0 factor "+this.dumpToString ());
for (int i = 0; i < probs.numLocations (); i++) {
double val = probs.valueAtLocation (i);
probs.setValueAtLocation (i, val - sum);
}
return this;
}
private double logspaceOneNorm ()
{
double sum = Double.NEGATIVE_INFINITY; // That's 0 in log space
for (int i = 0; i < probs.numLocations (); i++) {
sum = Maths.sumLogProb (sum, probs.valueAtLocation (i));
}
Flops.sumLogProb (probs.numLocations ());
return sum;
}
public double sum ()
{
Flops.exp (); // logspaceOneNorm counts rest
return Math.exp (logspaceOneNorm ());
}
public double logsum ()
{
return logspaceOneNorm ();
}
/**
* Does the conceptual equivalent of this *= pot.
* Assumes that pot's variables are a subset of
* this potential's.
*/
protected void multiplyByInternal (DiscreteFactor ptl)
{
int[] projection = largeIdxToSmall (ptl);
int numLocs = probs.numLocations ();
for (int singleLoc = 0; singleLoc < numLocs; singleLoc++) {
int smallIdx = projection[singleLoc];
double prev = this.probs.valueAtLocation (singleLoc);
double newVal = ptl.logValue (smallIdx);
double product = prev + newVal;
this.probs.setValueAtLocation (singleLoc, product);
}
Flops.increment (numLocs); // handle the pluses
}
// Does destructive divison on this, assuming this has all
// the variables in pot.
protected void divideByInternal (DiscreteFactor ptl)
{
int[] projection = largeIdxToSmall (ptl);
int numLocs = probs.numLocations ();
for (int singleLoc = 0; singleLoc < numLocs; singleLoc++) {
int smallIdx = projection[singleLoc];
double prev = this.probs.valueAtLocation (singleLoc);
double newVal = ptl.logValue (smallIdx);
double product = prev - newVal;
/* by convention, let -Inf + Inf (corresponds to 0/0) be -Inf */
if (Double.isInfinite (newVal)) {
product = Double.NEGATIVE_INFINITY;
}
this.probs.setValueAtLocation (singleLoc, product);
}
Flops.increment (numLocs); // handle the pluses
}
/**
* Does the conceptual equivalent of this += pot.
* Assumes that pot's variables are a subset of
* this potential's.
*/
protected void plusEqualsInternal (DiscreteFactor ptl)
{
int[] projection = largeIdxToSmall (ptl);
int numLocs = probs.numLocations ();
for (int singleLoc = 0; singleLoc < numLocs; singleLoc++) {
int smallIdx = projection[singleLoc];
double prev = this.probs.valueAtLocation (singleLoc);
double newVal = ptl.logValue (smallIdx);
double product = Maths.sumLogProb (prev, newVal);
this.probs.setValueAtLocation (singleLoc, product);
}
Flops.sumLogProb (numLocs);
}
public double value (Assignment assn)
{
Flops.exp ();
if (getNumVars () == 0) return 1.0;
return Math.exp (rawValue (assn));
}
public double value (AssignmentIterator it)
{
Flops.exp ();
return Math.exp (rawValue (it.indexOfCurrentAssn ()));
}
public double value (int idx)
{
Flops.exp ();
return Math.exp (rawValue (idx));
}
public double logValue (AssignmentIterator it)
{
return rawValue (it.indexOfCurrentAssn ());
}
public double logValue (int idx)
{
return rawValue (idx);
}
public double logValue (Assignment assn)
{
return rawValue (assn);
}
protected Factor marginalizeInternal (AbstractTableFactor result)
{
result.setAll (Double.NEGATIVE_INFINITY);
int[] projection = largeIdxToSmall (result);
/* Add each element of the single array of the large potential
to the correct element in the small potential. */
int numLocs = probs.numLocations ();
for (int largeLoc = 0; largeLoc < numLocs; largeLoc++) {
/* Convert a single-index from this distribution to
one for the smaller distribution */
int smallIdx = projection[largeLoc];
/* Whew! Now, add it in. */
double oldValue = this.probs.valueAtLocation (largeLoc);
double currentValue = result.probs.singleValue (smallIdx);
result.probs.setValueAtLocation (smallIdx,
Maths.sumLogProb (oldValue, currentValue));
}
Flops.sumLogProb (numLocs);
return result;
}
protected double rawValue (Assignment assn)
{
int numVars = getNumVars ();
int[] indices = new int[numVars];
for (int i = 0; i < numVars; i++) {
Variable var = getVariable (i);
indices[i] = assn.get (var);
}
return rawValue (indices);
}
private double rawValue (int[] indices)
{
// handle non-occuring indices specially, for default value is -Inf in log space.
int singleIdx = probs.singleIndex (indices);
return rawValue (singleIdx);
}
protected double rawValue (int singleIdx)
{
int loc = probs.location (singleIdx);
if (loc < 0) {
return Double.NEGATIVE_INFINITY;
} else {
return probs.valueAtLocation (loc);
}
}
public void exponentiate (double power)
{
Flops.increment (probs.numLocations ());
probs.timesEquals (power);
}
/*
protected AbstractTableFactor ensureOperandCompatible (AbstractTableFactor ptl)
{
if (!(ptl instanceof LogTableFactor)) {
return new LogTableFactor(ptl);
} else {
return ptl;
}
}
*/
public void setLogValue (Assignment assn, double logValue)
{
setRawValue (assn, logValue);
}
public void setLogValue (AssignmentIterator assnIt, double logValue)
{
setRawValue (assnIt, logValue);
}
public void setValue (AssignmentIterator assnIt, double value)
{
Flops.log ();
setRawValue (assnIt, Math.log (value));
}
public void setLogValues (double[] vals)
{
for (int i = 0; i < vals.length; i++) {
setRawValue (i, vals[i]);
}
}
public void setValues (double[] vals)
{
Flops.log (vals.length);
for (int i = 0; i < vals.length; i++) {
setRawValue (i, Math.log (vals[i]));
}
}
// v is *not* in log space
public void timesEquals (double v)
{
timesEqualsLog (Math.log (v));
}
private void timesEqualsLog (double logV)
{
Flops.increment (probs.numLocations ());
Matrix other = (Matrix) probs.cloneMatrix ();
other.setAll (logV);
probs.plusEquals (other);
}
protected void plusEqualsAtLocation (int loc, double v)
{
Flops.log (); Flops.sumLogProb (1);
double oldVal = logValue (loc);
setRawValue (loc, Maths.sumLogProb (oldVal, Math.log (v)));
}
public static LogTableFactor makeFromValues (Variable var, double[] vals2)
{
return makeFromValues (new Variable[]{var}, vals2);
}
public static LogTableFactor makeFromMatrix (Variable[] vars, SparseMatrixn values)
{
SparseMatrixn logValues = (SparseMatrixn) values.cloneMatrix ();
for (int i = 0; i < logValues.numLocations (); i++) {
logValues.setValueAtLocation (i, Math.log (logValues.valueAtLocation (i)));
}
Flops.log (logValues.numLocations ());
return new LogTableFactor (vars, logValues);
}
public static LogTableFactor makeFromLogMatrix (Variable[] vars, Matrix values)
{
Matrix logValues = (Matrix) values.cloneMatrix ();
return new LogTableFactor (vars, logValues);
}
public static LogTableFactor makeFromLogValues (Variable v, double[] vals)
{
return makeFromLogValues (new Variable[]{v}, vals);
}
public Matrix getValueMatrix ()
{
Matrix logProbs = (Matrix) probs.cloneMatrix ();
for (int loc = 0; loc < probs.numLocations (); loc++) {
logProbs.setValueAtLocation (loc, Math.exp (logProbs.valueAtLocation (loc)));
}
Flops.exp (probs.numLocations ());
return logProbs;
}
public Matrix getLogValueMatrix ()
{
return probs;
}
public double valueAtLocation (int idx)
{
Flops.exp ();
return Math.exp (probs.valueAtLocation (idx));
}
protected Factor slice_onevar (Variable var, Assignment observed)
{
Assignment assn = (Assignment) observed.duplicate ();
double[] vals = new double [var.getNumOutcomes ()];
for (int i = 0; i < var.getNumOutcomes (); i++) {
assn.setValue (var, i);
vals[i] = logValue (assn);
}
return LogTableFactor.makeFromLogValues (var, vals);
}
protected Factor slice_twovar (Variable v1, Variable v2, Assignment observed)
{
Assignment assn = (Assignment) observed.duplicate ();
int N1 = v1.getNumOutcomes ();
int N2 = v2.getNumOutcomes ();
int[] szs = new int[]{N1, N2};
double[] vals = new double [N1 * N2];
for (int i = 0; i < N1; i++) {
assn.setValue (v1, i);
for (int j = 0; j < N2; j++) {
assn.setValue (v2, j);
int idx = Matrixn.singleIndex (szs, new int[]{i, j}); // Inefficient, but much less error prone
vals[idx] = logValue (assn);
}
}
return LogTableFactor.makeFromLogValues (new Variable[]{v1, v2}, vals);
}
protected Factor slice_general (Variable[] vars, Assignment observed)
{
VarSet toKeep = new HashVarSet (vars);
toKeep.removeAll (observed.varSet ());
double[] vals = new double [toKeep.weight ()];
AssignmentIterator it = toKeep.assignmentIterator ();
while (it.hasNext ()) {
Assignment union = Assignment.union (observed, it.assignment ());
vals[it.indexOfCurrentAssn ()] = logValue (union);
it.advance ();
}
return LogTableFactor.makeFromLogValues (toKeep.toVariableArray (), vals);
}
public static LogTableFactor multiplyAll (Collection phis)
{
/* Get all the variables */
VarSet vs = new HashVarSet ();
for (Iterator it = phis.iterator (); it.hasNext ();) {
Factor phi = (Factor) it.next ();
vs.addAll (phi.varSet ());
}
/* define a new potential over the neighbors of NODE */
LogTableFactor newCPF = new LogTableFactor (vs);
for (Iterator it = phis.iterator (); it.hasNext ();) {
Factor phi = (Factor) it.next ();
newCPF.multiplyBy (phi);
}
return newCPF;
}
public AbstractTableFactor recenter ()
{
// return (AbstractTableFactor) normalize ();
int loc = argmax ();
double lval = probs.valueAtLocation(loc); // should be refactored
timesEqualsLog (-lval);
return this;
}
}
| 12,725 | 26.019108 | 103 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/Factor.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types; // Generated package name
import java.io.Serializable;
import java.util.Collection;
import cc.mallet.util.Randoms;
/**
* Interface for multivariate discrete probability distributions.
* All distributions are assumed to be over
* 0...k. If you want a distribution over some
* other discrete set, use the @see getAlphabet
* and @see setAlphabet members.
*
* (I thought about having a single Potential interface,
* for both continuous and discrete, but then all the method
* parameters were java.lang.Object, and the lack of type
* safety was both inefficient and disturbing.)
*
* Created: Mon Sep 15 14:04:58 2003
*
* @author <a href="mailto:[email protected]">Charles Sutton</a>
* @version $Id: Factor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public interface Factor extends Cloneable, Serializable {
/**
* Returns the value of the local function for a given assignment.
* All variables in the potential must be included, but it's
* okay if the assignment uses variables not in the potential.
*/
public double value (Assignment assn);
/**
* Returns the probability of an assignment to these variables.
* The assignment used is the curret assignment from the given
* AssignmentIterator.
* <p>
* This can be used to do things like
* <pre>
DiscretePotential phi = createMyPtl ();
for (AssignmentIterator it = phi.assignmentIterator; it.hasNext(); it.advance()) {
double val = ptl.phi (it);
// do something with val
}
</pre>
* <p>
* This is equivalent to creating an assignment object explicitly
* using <tt>(Assignment) it.next()</tt>, but can be much faster.
*/
public double value (AssignmentIterator it);
/**
* Multiplies this potential by a constant such that it sums to 1.
* Destructive; returns this factor.
*/
public Factor normalize ();
/**
* Returns the marginal of this distribution over the given variables.
*/
public Factor marginalize (Variable vars[]);
/**
* Returns the marginal of this distribution over the given variables.
*/
public Factor marginalize (Collection vars);
/**
* Returns the marginal of this distribution over one variable.
*/
public Factor marginalize (Variable var);
/**
* Returns the marginal distribution attained by summing out
* the given variable.
*/
public Factor marginalizeOut (Variable var);
/**
* Returns the marginal distribution attained by summing out
* the given set of variables.
*/
public Factor marginalizeOut (VarSet varset);
/**
* Returns a potential phi over the given variables
* obtained by taking
* phi (x) = max_[all v that contain x] this.prob (x)
*/
public Factor extractMax (Collection vars);
/**
* Returns a potential phi over the given variables
* obtained by taking
* phi (x) = max_[all v that contain x] this.prob (x)
*/
public Factor extractMax (Variable var);
/**
* Returns a potential phi over the given variables
* obtained by taking
* phi (x) = max_[all v that contain x] this.prob (x)
*/
public Factor extractMax (Variable[] vars);
/**
* Returns the assignment that maximizes this potential.
*/
// todo: should return an Assignment
int argmax ();
/**
* Return an assignment sampled from this factor, interpreting
* it as an unnormalized probability distribution.
*/
Assignment sample (Randoms r);
/**
* Returns the sum of this potential over all cases.
*/
public double sum ();
/**
* Returns the expected log factor value, i.e.,
* <tt>sum_x factor.value(x) * Math.log (factor.value (x))</tt>
* where the summation is taken over all passible assignments.
*/
public double entropy ();
/**
* Returns the elementwise product of this factor with
* another.
*/
public Factor multiply (Factor dist);
/**
* Does this *= pot.
* <P>
* If both potentials are currently in log space, then does
* addition instead.
* @throws UnsupportedOperationException If one potential is in
* log space and the other isn't.
*/
public void multiplyBy (Factor pot);
public void exponentiate (double power);
/**
* Computes this /= pot
* <P>
* If both potentials are currently in log space, then does
* subtraction instead.
* @throws UnsupportedOperationException If one potential is in
* log space and the other isn't.
*/
public void divideBy (Factor pot);
/**
* Returns whether the potential is over the given variable.
*/
public boolean containsVar (Variable var);
/** Returns set of variables in this potential. */
public VarSet varSet ();
/** Returns an iterator over all Assignmentss to this potential. */
public AssignmentIterator assignmentIterator ();
/** Returns whether this is almost equal to another potential. */
public boolean almostEquals (Factor p);
public boolean almostEquals (Factor p, double epsilon);
public Factor duplicate ();
public boolean isNaN ();
double logValue (AssignmentIterator it);
double logValue (Assignment assn);
double logValue (int loc);
Variable getVariable (int i);
String dumpToString ();
Factor slice (Assignment assn);
AbstractTableFactor asTable ();
}
| 5,667 | 26.64878 | 84 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/UnmodifiableVarSet.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import java.util.*;
import java.io.Serializable;
import java.io.ObjectInputStream;
import java.io.IOException;
import java.io.ObjectOutputStream;
/**
* Created: Dec 15, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: UnmodifiableVarSet.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class UnmodifiableVarSet implements VarSet, Serializable {
transient private VarSet subVarSet;
public UnmodifiableVarSet (VarSet subVarSet)
{
this.subVarSet = subVarSet;
}
public Variable get (int idx)
{
return subVarSet.get (idx);
}
public Set vertices ()
{
return Collections.unmodifiableSet (subVarSet);
}
public Variable[] toVariableArray ()
{
return subVarSet.toVariableArray ();
}
public int weight ()
{
return subVarSet.weight ();
}
public AssignmentIterator assignmentIterator ()
{
return subVarSet.assignmentIterator ();
}
public VarSet intersection (VarSet c)
{
return subVarSet.intersection (c);
}
public int size ()
{
return subVarSet.size ();
}
public boolean isEmpty ()
{
return subVarSet.isEmpty ();
}
public boolean contains (Object o)
{
return subVarSet.contains (o);
}
public Iterator iterator ()
{
return subVarSet.iterator ();
}
public Object[] toArray ()
{
return subVarSet.toArray ();
}
public Object[] toArray (Object[] objects)
{
return subVarSet.toArray (objects);
}
public boolean add (Object o)
{
throw new UnsupportedOperationException ("Attempt to modify unmodifiable clique: "+this);
}
public boolean remove (Object o)
{
throw new UnsupportedOperationException ("Attempt to modify unmodifiable clique: "+this);
}
public boolean containsAll (Collection collection)
{
return subVarSet.containsAll (collection);
}
public boolean addAll (Collection collection)
{
throw new UnsupportedOperationException ("Attempt to modify unmodifiable clique: "+this);
}
public boolean retainAll (Collection collection)
{
throw new UnsupportedOperationException ("Attempt to modify unmodifiable clique: "+this);
}
public boolean removeAll (Collection collection)
{
throw new UnsupportedOperationException ("Attempt to modify unmodifiable clique: "+this);
}
public void clear ()
{
throw new UnsupportedOperationException ("Attempt to modify unmodifiable clique: "+this);
}
public boolean equals (Object o)
{
return subVarSet.equals (o);
}
public int hashCode ()
{
return subVarSet.hashCode ();
}
public String toString ()
{
return subVarSet.toString ();
}
private static final long serialVersionUID = 1L;
private static final int SERIAL_VERSION = 1;
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException
{
in.defaultReadObject ();
in.readInt (); // version
subVarSet = (VarSet) in.readObject ();
}
private void writeObject (ObjectOutputStream out) throws IOException
{
out.defaultWriteObject ();
out.writeInt (SERIAL_VERSION);
out.writeObject (subVarSet);
}
}
| 3,601 | 21.654088 | 93 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/BitVarSet.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.*;
import cc.mallet.grmm.inference.Utils;
/**
* A clique that uses very little time and memory based on the flyweight
* pattern. The owner supplies an Alphabet of vertices and a BitSet,
* and the clique contains the vertices in the Alphabet, masked by the BitSet.
*
* @author Charles Sutton
* @version $Id: BitVarSet.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class BitVarSet extends AbstractSet implements VarSet {
private Universe universe;
private BitSet bitset;
/**
* Creates a BitSet clique given an alphabet of Variables,
* and a bitset that says which variables in the alphabet
* to include in the clique. Neither is copied, but
* neither is modified, either.
*
* @param universe
* @param included Bit mask that indicates which variables to include
*/
public BitVarSet (Universe universe, BitSet included)
{
this.universe = universe;
this.bitset = included;
// System.out.println("vertexMap: " + vertexMap);
// System.out.println("bitSet: " + bitset);
}
public BitVarSet (Universe universe, Collection included)
{
this.universe = universe;
this.bitset = new BitSet (universe.size ());
java.util.Iterator it = included.iterator();
while (it.hasNext()) {
bitset.set (universe.getIndex ((Variable) it.next()));
}
}
public BitVarSet (VarSet vsOld)
{
this (vsOld.get(0).getUniverse (), vsOld);
}
public boolean add (Object o)
{
int idx = universe.getIndex ((Variable) o);
if (idx == -1)
throw new UnsupportedOperationException();
bitset.set (idx);
return true;
}
public Variable get(int idx)
{
int i,mapIdx = 0;
for ( i = 0, mapIdx = bitset.nextSetBit (0) ; i<idx; i++)
{
mapIdx = bitset.nextSetBit (mapIdx+1);
if (mapIdx == -1)
throw new IndexOutOfBoundsException("Index "+idx+" in BitSetClique");
}
//System.out.println("["+idx+"]("+mapIdx+")"+vertexMap.lookupObject (mapIdx));
return universe.get (mapIdx);
}
public Variable[] toVariableArray()
{
return (Variable[]) toArray (new Variable[0]);
}
// FIXME cache not updated on changes to the clique
private int cachedWeight = -1;
public int weight()
{
if (cachedWeight == -1) {
int weight = 1;
Iterator it = new Iterator();
while (it.hasNext()) {
Variable var = (Variable) it.next();
weight *= var.getNumOutcomes();
}
cachedWeight = weight;
}
return cachedWeight;
}
public AssignmentIterator assignmentIterator()
{
return new DenseAssignmentIterator (this);
}
public int size()
{
return bitset.cardinality();
}
public boolean isEmpty()
{
return bitset.isEmpty();
}
public boolean contains(Object o)
{
return bitset.get(universe.getIndex ((Variable) o));
}
private class Iterator implements java.util.Iterator {
int nextIdx;
public Iterator () { nextIdx = bitset.nextSetBit (0); }
public boolean hasNext()
{
return (nextIdx >= 0);
}
public Object next()
{
int thisIdx = nextIdx;
nextIdx = bitset.nextSetBit (thisIdx + 1);
return universe.get (thisIdx);
}
public void remove()
{
throw new UnsupportedOperationException("Removal from BitSetClique not permitted");
}
}
public java.util.Iterator iterator()
{
return new Iterator();
}
public int hashCode ()
{
return bitset.hashCode ();
}
public boolean containsAll(Collection c)
{
if (c instanceof BitVarSet) {
return containsAll ((BitVarSet) c);
} else {
return super.containsAll (c);
}
}
/**
* Efficient version of containsAll() for BitSetCliques.
*/
public boolean containsAll (BitVarSet bsc)
{
assert universe == bsc.universe;
for(int i=bsc.bitset.nextSetBit(0); i>=0; i=bsc.bitset.nextSetBit(i+1)) {
if (!bitset.get (i)) {
return false;
}
}
return true;
}
public VarSet intersection (VarSet c) {
if (c instanceof BitVarSet) {
// Efficient implementation
BitVarSet bsc = (BitVarSet) c;
BitSet newBitSet = (BitSet) bitset.clone();
newBitSet.and (bsc.bitset);
return new BitVarSet (universe, newBitSet);
} else {
return Utils.defaultIntersection (this, c);
}
}
/**
* Returns the number of variables in the intersection between this clique and other.
* Equivalent to <tt>intersection(bsc).size()</tt>, but more efficient.
* @param bsc Other clique to intersect with
*/
public int intersectionSize (BitVarSet bsc)
{
assert universe == bsc.universe;
int size = 0;
for(int i=bsc.bitset.nextSetBit(0); i>=0; i=bsc.bitset.nextSetBit(i+1)) {
if (bitset.get (i)) {
size++;
}
}
return size;
}
public void clear()
{
bitset.clear();
}
public boolean hasLabel()
{
return true;
}
public String getLabel()
{
return toString ();
}
public String toString ()
{
String foo = "(C";
Iterator it = new Iterator ();
while (it.hasNext()) {
Variable var = (Variable) it.next();
foo = foo + " " + var;
}
foo = foo + ")";
return foo;
}
public void setLabel(String s)
{
throw new UnsupportedOperationException();
}
// Serialization garbage
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException
{
out.defaultWriteObject ();
out.writeInt (CURRENT_SERIAL_VERSION);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException
{
in.defaultReadObject ();
int version = in.readInt ();
}
}
| 6,262 | 21.288256 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/AbstractAssignmentIterator.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import cc.mallet.types.Matrixn;
/**
* Created: Dec 15, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: AbstractAssignmentIterator.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
abstract class AbstractAssignmentIterator implements AssignmentIterator {
protected VarSet vertsList;
protected int max = 1;
private int[] sizes;
private Assignment assn = null;
protected AbstractAssignmentIterator (VarSet verts)
{
vertsList = verts;
initSizes ();
}
private void initSizes ()
{
sizes = new int [vertsList.size()];
for (int i = 0; i < sizes.length; i++) {
Variable var = vertsList.get (i);
if (var.isContinuous ()) {
throw new UnsupportedOperationException ("Attempt to create AssignmentIterator over "+vertsList+", but "+var+" is continuous.");
}
sizes[i] = var.getNumOutcomes ();
}
max = vertsList.weight ();
}
protected Assignment constructAssignment ()
{
int current = indexOfCurrentAssn ();
if (sizes == null) initSizes (); // Lazily build sizes array
int[] outcomes = new int [sizes.length];
Matrixn.singleToIndices (current, outcomes, sizes);
Variable[] vars = (Variable[]) vertsList.toArray (new Variable [0]);
return new Assignment (vars, outcomes);
}
public void remove() {
throw new UnsupportedOperationException
("Attempt to remave assignment from Clique.");
}
public Assignment assignment ()
{
if (assn == null) {
assn = constructAssignment ();
return assn;
} else {
int current = indexOfCurrentAssn ();
int[] outcomes = new int [sizes.length];
Matrixn.singleToIndices (current, outcomes, sizes);
assn.setRow (0, outcomes);
return assn;
}
}
public Object next()
{
Assignment assn = assignment ();
advance ();
return assn;
}
}
| 2,344 | 28.3125 | 136 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/BoltzmannUnaryFactor.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import cc.mallet.util.Randoms;
/**
* A factor over a continuous variable theta and binary variables <tt>var</tt>.
* such that <tt>phi(x|theta)<tt> is Potts. That is, for fixed theta, <tt>phi(x)</tt> = 1
* if all x are equal, and <tt>exp^{-theta}</tt> otherwise.
* $Id: BoltzmannUnaryFactor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class BoltzmannUnaryFactor extends AbstractFactor implements ParameterizedFactor {
private Variable theta;
private Variable var; // The binary variable
public BoltzmannUnaryFactor (Variable var, Variable alpha)
{
super (BoltzmannUnaryFactor.combineVariables (alpha, var));
this.theta = alpha;
this.var = var;
if (var.getNumOutcomes () != 2) {
throw new IllegalArgumentException ("Discrete variable "+var+" in BoltzmannUnary must be binary.");
}
if (!alpha.isContinuous ()) {
throw new IllegalArgumentException ("Parameter "+alpha+" in BoltzmannUnary must be continuous.");
}
}
private static VarSet combineVariables (Variable alpha, Variable var)
{
VarSet ret = new HashVarSet ();
ret.add (alpha);
ret.add (var);
return ret;
}
protected Factor extractMaxInternal (VarSet varSet)
{
throw new UnsupportedOperationException ();
}
protected double lookupValueInternal (int i)
{
throw new UnsupportedOperationException ();
}
protected Factor marginalizeInternal (VarSet varsToKeep)
{
throw new UnsupportedOperationException ();
}
/* Inefficient, but this will seldom be called. */
public double value (AssignmentIterator it)
{
Assignment assn = it.assignment();
Factor tbl = sliceForAlpha (assn);
return tbl.value (assn);
}
private Factor sliceForAlpha (Assignment assn)
{
double alph = assn.getDouble (theta);
double[] vals = new double[] { 0.0, -alph };
return LogTableFactor.makeFromLogValues (var, vals);
}
public Factor normalize ()
{
throw new UnsupportedOperationException ();
}
public Assignment sample (Randoms r)
{
throw new UnsupportedOperationException ();
}
public double logValue (AssignmentIterator it)
{
return Math.log (value (it));
}
public Factor slice (Assignment assn)
{
Factor alphSlice = sliceForAlpha (assn);
// recursively slice, in case assn includes some of the xs
return alphSlice.slice (assn);
}
public String dumpToString ()
{
StringBuffer buf = new StringBuffer ();
buf.append ("Potts Alpha=");
buf.append (theta);
buf.append (var);
return buf.toString ();
}
public double sumGradLog (Factor q, Variable param, Assignment paramAssn)
{
if (param != theta) throw new IllegalArgumentException ();
Factor q_xs = q.marginalize (var);
Assignment assn = new Assignment (var, 1);
return - q_xs.value (assn);
}
/*
public double secondDerivative (Factor q, Variable param, Assignment paramAssn)
{
if (param != theta) throw new IllegalArgumentException ();
Factor q_xs = q.marginalize (var);
Assignment assn = new Assignment (var, 1);
double p = - q_xs.value (assn);
return p * (p - 1);
}
*/
public Factor duplicate ()
{
return new BoltzmannUnaryFactor (var, theta);
}
public boolean almostEquals (Factor p, double epsilon)
{
return equals (p);
}
public boolean isNaN ()
{
return false;
}
public boolean equals (Object o)
{
if (this == o) return true;
if (o == null || getClass () != o.getClass ()) return false;
final BoltzmannUnaryFactor that = (BoltzmannUnaryFactor) o;
if (theta != null ? !theta.equals (that.theta) : that.theta != null) return false;
if (var != null ? !var.equals (that.var) : that.var != null) return false;
return true;
}
public int hashCode ()
{
int result;
result = (theta != null ? theta.hashCode () : 0);
result = 29 * result + (var != null ? var.hashCode () : 0);
return result;
}
}
| 4,407 | 26.55 | 107 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/NormalFactor.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import cc.mallet.util.Maths;
import cc.mallet.util.Randoms;
import no.uib.cipr.matrix.*;
/**
* Multivariate Gaussian factor. Currently, almost all of this class
* is a stub, except for the sample method.
* $Id: NormalFactor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class NormalFactor extends AbstractFactor {
private Vector mean;
private Matrix variance;
public NormalFactor (VarSet vars, Vector mean, Matrix variance)
{
super (vars);
if (!isPosDef (variance)) throw new IllegalArgumentException ("Matrix "+variance+" not positive definite.");
this.mean = mean;
this.variance = variance;
}
private boolean isPosDef (Matrix variance)
{
try {
EVD evd = EVD.factorize (variance);
double[] vals = evd.getRealEigenvalues ();
return vals[vals.length - 1] > 0;
} catch (NotConvergedException e) {
throw new RuntimeException (e);
}
}
//
protected Factor extractMaxInternal (VarSet varSet)
{
throw new UnsupportedOperationException ();
}
public double value (Assignment assn)
{
// stub
return 1.0;
}
protected double lookupValueInternal (int i)
{
throw new UnsupportedOperationException ();
}
protected Factor marginalizeInternal (VarSet varsToKeep)
{
throw new UnsupportedOperationException ();
}
public Factor normalize ()
{
return this;
}
public Assignment sample (Randoms r)
{
// generate from standard normal
double[] vals = new double [mean.size ()];
for (int k = 0; k < vals.length; k++) {
vals[k] = r.nextGaussian ();
}
// and transform
Vector Z = new DenseVector (vals, false);
DenseVector result = new DenseVector (vals.length);
variance.mult (Z, result);
result = (DenseVector) result.add (mean);
return new Assignment (vars.toVariableArray (), result.getData ());
}
public boolean almostEquals (Factor p, double epsilon)
{
return equals (p);
}
public Factor duplicate ()
{
return new NormalFactor (vars, mean, variance);
}
public boolean isNaN ()
{
return false;
}
public String dumpToString ()
{
return toString ();
}
public String toString ()
{
return "[NormalFactor "+vars+" "+mean+" ... " +variance+" ]";
}
// todo
public Factor slice (Assignment assn)
{
if (assn.varSet ().containsAll (vars)) {
// special case
return new ConstantFactor (value (assn));
} else {
throw new UnsupportedOperationException ();
}
}
public void multiplyBy (Factor f)
{
if (f instanceof ConstantFactor) {
double val = f.value (new Assignment());
// NormalFactor must be normalized right now...
if (Maths.almostEquals (val, 1.0)) {
return; // ok, it's an identity factor
}
}
throw new UnsupportedOperationException ("Can't multiply NormalFactor by "+f);
}
public void divideBy (Factor f)
{
if (f instanceof ConstantFactor) {
double val = f.value (new Assignment());
// NormalFactor must be normalized right now...
if (Maths.almostEquals (val, 1.0)) {
return; // ok, it's an identity factor
}
}
throw new UnsupportedOperationException ("Can't divide NormalFactor by "+f);
}
}
| 3,718 | 23.629139 | 112 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/UndirectedModel.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import java.util.*;
import gnu.trove.THashSet;
import org._3pq.jgrapht.UndirectedGraph;
import org._3pq.jgrapht.alg.ConnectivityInspector;
import cc.mallet.grmm.util.Graphs;
/**
* Class for pairwise undirected graphical models, also known as
* pairwise Markov random fields. This is a thin wrapper over
* FactorGraph, with only a few methods added that don't make
* sense for non-pairwise graphs.
*
* Created: Dec 21, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: UndirectedModel.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class UndirectedModel extends FactorGraph {
public UndirectedModel ()
{
}
public UndirectedModel (Variable[] vars)
{
super (vars);
}
public UndirectedModel (int capacity)
{
super (capacity);
}
private Set edges = new THashSet ();
public Set getEdgeSet () {
return Collections.unmodifiableSet (edges);
}
public void addFactor (Factor factor)
{
super.addFactor (factor);
if (factor.varSet ().size() == 2) {
edges.add (factor.varSet ());
}
}
/**
* Creates an undirected model that corresponds to a Boltzmann machine with
* the given weights and biases.
* @param weights
* @param biases
* @return An appropriate UndirectedModel.
*/
public static UndirectedModel createBoltzmannMachine (double[][] weights, double[] biases)
{
if (weights.length != biases.length)
throw new IllegalArgumentException ("Number of weights "+weights.length
+" not equal to number of biases "+biases.length);
int numV = weights.length;
Variable vars[] = new Variable [numV];
for (int i = 0; i< numV; i++) vars[i] = new Variable (2);
UndirectedModel mdl = new UndirectedModel (vars);
for (int i = 0; i < numV; i++) {
Factor nodePtl = new TableFactor (vars[i], new double[] { 1, Math.exp (biases[i]) });
mdl.addFactor (nodePtl);
for (int j = i+1; j < numV; j++) {
if (weights[i][j] != 0) {
double[] ptl = new double[] { 1, 1, 1, Math.exp (weights[i][j]) };
mdl.addFactor (vars[i], vars[j], ptl);
}
}
}
return mdl;
}
//xxx Insanely inefficient stub
public boolean isConnected (Variable v1, Variable v2)
{
UndirectedGraph g = Graphs.mdlToGraph (this);
ConnectivityInspector ins = new ConnectivityInspector (g);
return g.containsVertex (v1) && g.containsVertex (v2) && ins.pathExists (v1, v2);
}
}
| 2,948 | 28.787879 | 92 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/DiscreteFactor.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://mallet.cs.umass.edu/
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import cc.mallet.util.Randoms;
/**
* $Id: DiscreteFactor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public interface DiscreteFactor extends Factor {
int sampleLocation (Randoms r);
double value (int index);
int numLocations ();
double valueAtLocation (int loc);
int indexAtLocation (int loc);
double[] toValueArray ();
int singleIndex (int[] smallDims);
}
| 824 | 25.612903 | 76 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/HashVarSet.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import gnu.trove.THashSet;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.*;
import cc.mallet.grmm.inference.Utils;
/**
* A clique is a collection of nodes in a graph that are all
* adjacent. We implement it cheaply by delegating to a HashSet.
*
* Created: Wed Sep 17 12:50:01 2003
*
* @author <a href="mailto:[email protected]">Charles Sutton</a>
* @version $Id: HashVarSet.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
// xxx Perhaps this should just use an alphabet and not implement Set.
public class HashVarSet implements VarSet, Serializable {
private THashSet verts = new THashSet();
private ArrayList vertsList = new ArrayList ();
/**
* Create an empty clique.
*/
public HashVarSet ()
{
super ();
} // Clique constructor
/**
* Create a two-clique given an edge in a graph.
*
public HashVarSet (Edge e) {
super ();
add (e.getVertexA());
add (e.getVertexB());
}
*/
/**
* Create a Clique given a Collection of nodes.
*/
public HashVarSet (Collection c)
{
super();
addAll (c);
}
public HashVarSet (Variable[] vars)
{
super();
addAll (Arrays.asList (vars));
}
public Variable get (int idx)
{
return (Variable) vertsList.get (idx);
}
public String getLabel () {
return toString ();
}
/**
* Returns the intersection of two cliques.
*/
public VarSet intersection (VarSet c)
{
return Utils.defaultIntersection (this, c);
}
// Code for delegation of java.util.AbstractSet methods to verts
/* Can't delegate to THashMap, because in early versions of Trove (that we are frozen at)
* the THashMap.hashCode() isn't consistent with equals. This is workaround, which may
* be removed when we upgrade Trove. */
public int hashCode()
{
int ret = 39;
for (Iterator it = vertsList.iterator (); it.hasNext ();) {
Object o = it.next ();
ret = 59 * ret + o.hashCode ();
}
return ret;
}
public boolean equals(Object object)
{
return verts.equals(object);
}
public boolean removeAll(Collection collection)
{
boolean ret = true;
for (Iterator it = collection.iterator (); it.hasNext ();) {
Object o = it.next ();
ret = remove (o) & ret;
}
return ret;
}
public Variable[] toVariableArray ()
{
// Cannot just do (Variable[]) vertsList.toArray() because that
// would cause a ClassCastException. I suppose that's why
// toArray is overloaded...
return (Variable[]) vertsList.toArray (new Variable[size()]);
}
// Code for delegation of java.util.AbstractCollection methods to verts
public String toString()
{
String val = "(C";
for (Iterator it = iterator(); it.hasNext();) {
val += " ";
val += it.next().toString();
}
val += ")";
return val;
}
public boolean addAll (Collection collection)
{
boolean ret = true;
for (Iterator it = collection.iterator (); it.hasNext ();) {
ret = ret & add (it.next (), false);
}
Collections.sort (vertsList);
return ret;
}
/** Returns the variables in this clique as an array. If the
* clique is not modified, then the ordering will remain consistent
* across calls.
*/
public Object[] toArray(Object[] objectArray)
{
// Using vertsList here assures that toArray() always returns the
// same ordering.
return vertsList.toArray(objectArray);
}
/** Returns the variables in this clique as an array. If the
* clique is not modified, then the ordering will remain consistent
* across calls.
*/
public Object[] toArray()
{
// Using vertsList here assures that toArray() always returns the
// same ordering.
return vertsList.toArray();
}
public boolean containsAll(Collection collection)
{
return verts.containsAll(collection);
}
public boolean retainAll(Collection collection)
{
return verts.retainAll(collection);
}
// Code for delegation of java.util.HashSet methods to verts
public Object clone()
{
return verts.clone();
}
public boolean add(Object object)
{
return add (object, true);
}
public boolean add(Object object, boolean checkSorted)
{
if (!(object instanceof Variable)) throw new IllegalArgumentException (object.toString ());
if (!verts.contains (object)) {
vertsList.add (object);
boolean ret = verts.add (object);
if (checkSorted) Collections.sort (vertsList);
return ret;
} else { return false; }
}
public boolean contains(Object object)
{
return verts.contains(object);
}
public int size()
{
return verts.size();
}
// Returns the total size of a dense discrete variable over this clique.
public int weight () {
int tot = 1;
for (int i = 0; i < vertsList.size(); i++) {
Variable var = (Variable) vertsList.get (i);
tot *= var.getNumOutcomes ();
}
return tot;
}
public Iterator iterator()
{
return vertsList.iterator();
}
public boolean remove(Object object)
{
vertsList.remove (object);
return verts.remove (object);
}
public void clear()
{
vertsList.clear ();
verts.clear();
}
public boolean isEmpty() {
return verts.isEmpty();
}
// Iterating over assignments
public AssignmentIterator assignmentIterator ()
{
return new DenseAssignmentIterator (this);
}
// Serialization garbage
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException
{
out.defaultWriteObject ();
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeInt (size());
for (int vi = 0; vi < size(); vi++) {
out.writeObject (get (vi));
}
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException
{
in.defaultReadObject ();
int version = in.readInt ();
int size = in.readInt ();
for (int vi = 0; vi < size; vi++){
Variable var = (Variable) in.readObject ();
add (var);
}
}
} // Clique
| 6,495 | 22.117438 | 95 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/Assignment.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import gnu.trove.TObjectIntHashMap;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import java.io.*;
import cc.mallet.grmm.inference.Utils;
import cc.mallet.types.Matrixn;
import cc.mallet.types.SparseMatrixn;
import cc.mallet.util.Randoms;
/**
* An assignment to a bunch of variables.
* <p/>
* Note that outcomes are always integers. If you
* want them to be something else, then the Variables
* all have outcome Alphabets; for example, see
* {@link Variable#lookupOutcome}.
* <p/>
* Created: Tue Oct 21 15:11:11 2003
*
* @author <a href="mailto:[email protected]">Charles Sutton</a>
* @version $Id: Assignment.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class Assignment extends AbstractFactor implements Serializable {
/* Maps from vars => indicies */
transient TObjectIntHashMap var2idx;
/* List of Object[]. Each array represents one configuration. */
transient ArrayList values;
double scale = 1.0;
/**
* Creates an empty assignment.
*/
public Assignment ()
{
super (new HashVarSet ());
var2idx = new TObjectIntHashMap ();
values = new ArrayList();
}
public Assignment (Variable var, int outcome)
{
this ();
addRow (new Variable[] { var }, new int[] { outcome });
}
public Assignment (Variable var, double outcome)
{
this ();
addRow (new Variable[] { var }, new double[] { outcome });
}
/**
* Creates an assignemnt for the given variables.
*/
public Assignment (Variable[] vars, int[] outcomes)
{
var2idx = new TObjectIntHashMap (vars.length);
values = new ArrayList ();
addRow (vars, outcomes);
}
/**
* Creates an assignemnt for the given variables.
*/
public Assignment (Variable[] vars, double[] outcomes)
{
var2idx = new TObjectIntHashMap (vars.length);
values = new ArrayList ();
addRow (vars, outcomes);
}
/**
* Creates an assignemnt for the given variables.
*/
public Assignment (List vars, int[] outcomes)
{
var2idx = new TObjectIntHashMap (vars.size ());
values = new ArrayList ();
addRow ((Variable[]) vars.toArray (new Variable[0]), outcomes);
}
/**
* Creates an assignment over all Variables in a model.
* The assignment will assign outcomes[i] to the variable
* <tt>mdl.get(i)</tt>
*/
public Assignment (FactorGraph mdl, int[] outcomes)
{
var2idx = new TObjectIntHashMap (mdl.numVariables ());
values = new ArrayList ();
Variable[] vars = new Variable [mdl.numVariables ()];
for (int i = 0; i < vars.length; i++) vars[i] = mdl.get (i);
addRow (vars, outcomes);
}
/**
* Returns the union of two Assignments. That is, the value of a variable in the returned Assignment
* will be as specified in one of the given assignments.
* <p>
* If the assignments share variables, the value in the new Assignment for those variables in
* undefined.
*
* @param assn1 One assignment.
* @param assn2 Another assignment.
* @return A newly-created Assignment.
*/
public static Assignment union (Assignment assn1, Assignment assn2)
{
Assignment ret = new Assignment ();
VarSet vars = new HashVarSet ();
vars.addAll (assn1.vars);
vars.addAll (assn2.vars);
Variable[] varr = vars.toVariableArray ();
if (assn1.numRows () == 0) {
return (Assignment) assn2.duplicate ();
} else if (assn2.numRows () == 0) {
return (Assignment) assn1.duplicate ();
} else if (assn1.numRows () != assn2.numRows ()) {
throw new IllegalArgumentException ("Number of rows not equal.");
}
for (int ri = 0; ri < assn2.numRows (); ri++) {
Object[] row = new Object [vars.size()];
for (int vi = 0; vi < vars.size(); vi++) {
Variable var = varr[vi];
if (!assn1.containsVar (var)) {
row[vi] = assn2.getObject (var);
} else if (!assn2.containsVar (var)) {
row[vi] = assn1.getObject (var);
} else {
Object val1 = assn1.getObject (var);
Object val2 = assn2.getObject (var);
if (!val1.equals (val2)) {
throw new IllegalArgumentException ("Assignments don't match on intersection.\n ASSN1["+var+"] = "+val1+"\n ASSN2["+var+"] = "+val2);
}
row[vi] = val1;
}
}
ret.addRow (varr, row);
}
return ret;
}
/**
* Returns a new assignment which only assigns values to those variabes in a given clique.
* @param assn A large assignment
* @param varSet Which variables to restrict assignment o
* @return A newly-created Assignment
* @deprecated marginalize
*/
public static Assignment restriction (Assignment assn, VarSet varSet)
{
return (Assignment) assn.marginalize (varSet);
}
public Assignment getRow (int ridx)
{
Assignment assn = new Assignment ();
assn.var2idx = (TObjectIntHashMap) this.var2idx.clone ();
assn.vars = new UnmodifiableVarSet (vars);
assn.addRow ((Object[]) values.get (ridx));
return assn;
}
public void addRow (Variable[] vars, int[] values) { addRow (vars, boxArray (values)); }
public void addRow (Variable[] vars, double[] values) { addRow (vars, boxArray (values)); }
public void addRow (Variable[] vars, Object[] values)
{
checkAssignmentsMatch (vars);
addRow (values);
}
public void addRow (Object[] row)
{
if (row.length != numVariables ())
throw new IllegalArgumentException ("Wrong number of variables when adding to "+this+"\nwas:\n");
this.values.add (row);
}
public void addRow (Assignment other)
{
checkAssignmentsMatch (other);
for (int ridx = 0; ridx < other.numRows(); ridx++) {
Object[] otherRow = (Object[]) other.values.get (ridx);
Object[] row = new Object [otherRow.length];
for (int vi = 0; vi < row.length; vi++) {
Variable var = this.getVariable (vi);
row[vi] = other.getObject (ridx, var);
}
this.addRow (row);
}
}
private void checkAssignmentsMatch (Assignment other)
{
if (numVariables () == 0) {
setVariables (other.vars.toVariableArray ());
} else {
if (numVariables () != other.numVariables ())
throw new IllegalArgumentException ("Attempt to add row with non-matching variables.\n" +
" This has vars: " + varSet () + "\n Other has vars:" + other.varSet ());
for (int vi = 0; vi < numVariables (); vi++) {
Variable var = this.getVariable (vi);
if (!other.containsVar (var)) {
throw new IllegalArgumentException ("Attempt to add row with non-matching variables");
}
}
}
}
private void checkAssignmentsMatch (Variable[] vars)
{
if (numRows () == 0) {
setVariables (vars);
} else {
checkVariables (vars);
}
}
private void checkVariables (Variable[] vars)
{
for (int i = 0; i < vars.length; i++) {
Variable v1 = vars[i];
Variable v2 = (Variable) this.vars.get (i);
if (v1 != v2)
throw new IllegalArgumentException ("Attempt to add row with incompatible variables.");
}
}
private void setVariables (Variable[] varr)
{
vars.addAll (Arrays.asList (varr));
for (int i = 0; i < varr.length; i++) {
Variable v = varr[i];
var2idx.put (v, i);
}
}
private Object[] boxArray (int[] values)
{
Object[] ret = new Object[values.length];
for (int i = 0; i < ret.length; i++) {
ret[i] = new Integer (values[i]);
}
return ret;
}
private Object[] boxArray (double[] values)
{
Object[] ret = new Object[values.length];
for (int i = 0; i < ret.length; i++) {
ret[i] = new Double (values[i]);
}
return ret;
}
public int numRows () { return values.size (); }
public int get (Variable var)
{
if (numRows() != 1) throw new IllegalArgumentException ("Attempt to call get() with no row specified: "+this);
return get (0, var);
}
public double getDouble (Variable var)
{
if (numRows() != 1) throw new IllegalArgumentException ("Attempt to call getDouble() with no row specified: "+this);
return getDouble (0, var);
}
public Object getObject (Variable var)
{
if (numRows() != 1) throw new IllegalArgumentException ("Attempt to call getObject() with no row specified: "+this);
return getObject (0, var);
}
/**
* Returns the value of var in this assigment.
*/
public int get (int ridx, Variable var)
{
int idx = colOfVar (var, false);
if (idx == -1)
throw new IndexOutOfBoundsException
("Assignment does not give a value for variable " + var);
Object[] row = (Object[]) values.get (ridx);
Integer integer = (Integer) row[idx];
return integer.intValue ();
}
/**
* Returns the value of var in this assigment.
* This will be removed when we switch to Java 1.5.
*/
public double getDouble (int ridx, Variable var)
{
int idx = colOfVar (var, false);
if (idx == -1)
throw new IndexOutOfBoundsException
("Assignment does not give a value for variable " + var);
Object[] row = (Object[]) values.get (ridx);
Double dbl = (Double) row[idx];
return dbl.doubleValue ();
}
public Object getObject (int ri, Variable var)
{
Object[] row = (Object[]) values.get (ri);
int ci = colOfVar (var, false);
if (ci < 0) throw new IllegalArgumentException ("Variable "+var+" does not exist in this assignment.");
return row[ci];
}
public Variable getVariable (int i)
{
return (Variable) vars.get (i);
}
/** Returns all variables which are assigned to. */
public Variable[] getVars () {
return (Variable[]) vars.toArray (new Variable [0]);
}
public int size ()
{
return numVariables ();
}
public static Assignment makeFromSingleIndex (VarSet clique, int idx)
{
int N = clique.size ();
Variable[] vars = clique.toVariableArray ();
int[] idxs = new int [N];
int[] szs = new int [N];
// compute sizes
for (int i = 0; i < N; i++) {
Variable var = vars[i];
szs[i] = var.getNumOutcomes ();
}
Matrixn.singleToIndices (idx, idxs, szs);
return new Assignment (vars, idxs);
}
/**
* Converts this assignment into a unique integer.
* All different assignments to the same variables are guaranteed to
* have unique integers. The range of the index will be between
* 0 (inclusive) and M (exclusive), where M is the product of all
* cardinalities of all variables in this assignment.
*
* @return An integer
*/
public int singleIndex ()
{
int nr = numRows ();
if (nr == 0) {
return -1;
} else if (nr > 1) {
throw new IllegalArgumentException ("No row specified.");
} else {
return singleIndex (0);
}
}
private void checkIsSingleRow () {if (numRows () != 1) throw new IllegalArgumentException ("No row specified.");}
public int singleIndex (int row)
{
// these could be cached
int[] szs = new int [numVariables ()];
for (int i = 0; i < numVariables (); i++) {
Variable var = (Variable) vars.get (i);
szs[i] = var.getNumOutcomes ();
}
int[] idxs = toIntArray (row);
return Matrixn.singleIndex (szs, idxs);
}
public int numVariables () {return vars.size ();}
private int[] toIntArray (int ridx)
{
int[] idxs = new int [numVariables ()];
Object[] row = (Object[]) values.get (ridx);
for (int i = 0; i < row.length; i++) {
Integer val = (Integer) row [i];
idxs[i] = val.intValue ();
}
return idxs;
}
public double[] toDoubleArray (int ridx)
{
double[] idxs = new double [numVariables ()];
Object[] row = (Object[]) values.get (ridx);
for (int i = 0; i < row.length; i++) {
Double val = (Double) row [i];
idxs[i] = val.doubleValue ();
}
return idxs;
}
public Factor duplicate ()
{
Assignment ret = new Assignment ();
ret.vars = new HashVarSet (vars);
ret.var2idx = (TObjectIntHashMap) var2idx.clone ();
ret.values = new ArrayList (values.size ());
for (int ri = 0; ri < values.size(); ri++) {
Object[] vals = (Object[]) values.get (ri);
ret.values.add (vals.clone ());
}
ret.scale = scale;
return ret;
}
public void dump ()
{
dump (new PrintWriter (new OutputStreamWriter (System.out), true));
}
public void dump (PrintWriter out)
{
out.print ("ASSIGNMENT ");
out.println (varSet ());
for (int vi = 0; vi < var2idx.size (); vi++) {
Variable var = vars.get (vi);
out.print (var);
out.print (" ");
}
out.println ();
for (int ri = 0; ri < numRows (); ri++) {
for (int vi = 0; vi < var2idx.size (); vi++) {
Variable var = vars.get (vi);
Object obj = getObject (ri, var);
out.print (obj);
out.print (" ");
}
out.println ();
}
}
public void dumpNumeric ()
{
for (int i = 0; i < var2idx.size (); i++) {
Variable var = (Variable) vars.get (i);
int outcome = get (var);
System.out.println (var + " " + outcome);
}
}
/** Returns true if this assignment specifies a value for <tt>var</tt> */
public boolean containsVar (Variable var)
{
int idx = colOfVar (var, false);
return (idx != -1);
}
public void setValue (Variable var, int value)
{
checkIsSingleRow ();
setValue (0, var, value);
}
public void setValue (int ridx, Variable var, int value)
{
int ci = colOfVar (var, true);
Object[] row = (Object[]) values.get (ridx);
row[ci] = new Integer (value);
}
public void setDouble (int ridx, Variable var, double value)
{
int ci = colOfVar (var, true);
Object[] row = (Object[]) values.get (ridx);
row[ci] = new Double (value);
}
private int colOfVar (Variable var, boolean doAdd)
{
int ci = var2idx.get (var);
if (doAdd && ci == -1) {
ci = addVar (var);
}
return ci;
}
private int addVar (Variable var)
{
int ci = vars.size ();
vars.add (var);
var2idx.put (var, ci);
// expand all rows
for (int i = 0; i < numRows (); i++) {
Object[] oldVal = (Object[]) values.get (i);
Object[] newVal = new Object[ci+1];
System.arraycopy (oldVal, 0, newVal, 0, ci);
values.set (i, newVal);
}
return ci;
}
public void setRow (int ridx, Assignment other)
{
checkAssignmentsMatch (other);
Object[] row = (Object[]) other.values.get (ridx);
values.set (ridx, row.clone());
}
public void setRow (int ridx, int[] vals)
{
values.set (ridx, boxArray (vals));
}
protected Factor extractMaxInternal (VarSet varSet)
{
return asTable ().extractMax (varSet);
}
protected double lookupValueInternal (int assnIdx)
{
int val = 0;
for (int ri = 0; ri < numRows (); ri++) {
if (singleIndex (ri) == assnIdx) {
val++;
}
}
return val * scale;
}
protected Factor marginalizeInternal (VarSet varsToKeep)
{
Assignment ret = new Assignment ();
Variable[] vars = varsToKeep.toVariableArray ();
for (int ri = 0; ri < this.numRows (); ri++) {
Object[] row = new Object [vars.length];
for (int vi = 0; vi < varsToKeep.size (); vi++) {
Variable var = varsToKeep.get (vi);
row[vi] = this.getObject (ri, var);
}
ret.addRow (vars, row);
}
ret.scale = scale;
return ret;
}
public boolean almostEquals (Factor p, double epsilon)
{
return asTable ().almostEquals (p, epsilon);
}
public boolean isNaN ()
{
return false; //To change body of implemented methods use File | Settings | File Templates.
}
public Factor normalize ()
{
scale = 1.0 / numRows ();
return this;
}
public Assignment sample (Randoms r)
{
int ri = r.nextInt (numRows ());
Object[] vals = (Object[]) values.get (ri);
Assignment assn = new Assignment ();
Variable[] varr = (Variable[]) vars.toArray (new Variable [numVariables ()]);
assn.addRow (varr, vals);
return assn;
}
public String dumpToString ()
{
StringWriter writer = new StringWriter ();
dump (new PrintWriter (writer));
return writer.toString ();
}
// todo: think about the semantics of this
public Factor slice (Assignment assn)
{
throw new UnsupportedOperationException ();
}
public AbstractTableFactor asTable ()
{
Variable[] varr = (Variable[]) vars.toArray (new Variable [0]);
int[] idxs = new int[numRows ()];
double[] vals = new double[numRows ()];
for (int ri = 0; ri < numRows (); ri++) {
idxs[ri] = singleIndex (ri);
vals[ri]++;
}
SparseMatrixn matrix = new SparseMatrixn (Utils.toSizesArray (varr), idxs, vals);
return new TableFactor (varr, matrix);
}
/** Returns a list of single-row assignments, one for each row in this assignment. */
public List asList ()
{
List lst = new ArrayList (numRows ());
for (int ri = 0; ri < numRows (); ri++) {
lst.add (getRow (ri));
}
return lst;
}
public Assignment subAssn (int start, int end)
{
Assignment other = new Assignment ();
for (int ri = start; ri < end; ri++) {
other.addRow (getRow (ri));
}
return other;
}
public int[] getColumnInt (Variable x1)
{
int[] ret = new int [numRows ()];
for (int ri = 0; ri < ret.length; ri++) {
ret[ri] = get (ri, x1);
}
return ret;
}
private static final long serialVersionUID = 1;
private static final int SERIAL_VERSION = 2;
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException
{
// in.defaultReadObject ();
int version = in.readInt (); // version
int numVariables = in.readInt ();
var2idx = new TObjectIntHashMap (numVariables);
for (int vi = 0; vi < numVariables; vi++) {
Variable var = (Variable) in.readObject ();
var2idx.put (var, vi);
}
int numRows = in.readInt ();
values = new ArrayList (numRows);
for (int ri = 0; ri < numRows; ri++) {
Object[] row = (Object[]) in.readObject ();
values.add (row);
}
scale = (version >= 2) ? in.readDouble () : 1.0;
}
private void writeObject (ObjectOutputStream out) throws IOException
{
// out.defaultWriteObject ();
out.writeInt (SERIAL_VERSION);
out.writeInt (numVariables ());
for (int vi = 0; vi < numVariables (); vi++) {
out.writeObject (getVariable (vi));
}
out.writeInt (numRows ());
for (int ri = 0; ri < numRows (); ri++) {
Object[] row = (Object[]) values.get (ri);
out.writeObject (row);
}
out.writeDouble (scale);
}
}
| 19,172 | 26.042313 | 147 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/VarSet.java | package cc.mallet.grmm.types;
import java.util.Set;
/**
* Interface for classes that maintain a set of variables in a specified order.
*
* @author Charles Sutton
* @version $Id: VarSet.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public interface VarSet extends Set, Cloneable {
/**
* Returns the variable in this clique at index idx.
* @param idx
* @return the variable
*/
Variable get (int idx);
/**
* Returns the variables in this clique as an array, that should
* not be modified.
* @return An array of Variables.
*/
Variable[] toVariableArray ();
/**
* Returns the number of assignments of this clique.
*/
int weight ();
/**
* Returns an iterator over the assignments to this clique.
* Each element in the Iterator is an {@link cc.mallet.grmm.types.Assignment} object.
* @return An iterator over assignments
*/
AssignmentIterator assignmentIterator ();
/**
* Returns the intersection of two cliques.
*/
VarSet intersection (VarSet c);
}
| 1,033 | 20.102041 | 88 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/UniformFactor.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import cc.mallet.util.Randoms;
/**
* $Id: UniformFactor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class UniformFactor extends AbstractFactor {
private Variable var;
private double min;
private double max;
private double val;
public UniformFactor (Variable var, double min, double max)
{
this (var, min, max, 1.0 / (max - min));
}
public UniformFactor (Variable var, double min, double max, double val)
{
super (new HashVarSet (new Variable[] { var }));
if (!var.isContinuous ()) throw new IllegalArgumentException ();
if (min >= max) throw new IllegalArgumentException ();
this.var = var;
this.min = min;
this.max = max;
this.val = val;
}
//
protected Factor extractMaxInternal (VarSet varSet)
{
throw new UnsupportedOperationException ();
}
public double value (Assignment assn)
{
double x = assn.getDouble (var);
if ((min < x) && (x < max)) {
return val;
} else {
return 0;
}
}
protected double lookupValueInternal (int i)
{
throw new UnsupportedOperationException ();
}
protected Factor marginalizeInternal (VarSet varsToKeep)
{
if (varsToKeep.contains (var)) {
return duplicate ();
} else {
return new ConstantFactor (val * (max - min));
}
}
public Factor normalize ()
{
val = 1.0 / (max - min);
return this;
}
public Assignment sample (Randoms r)
{
double val = r.nextUniform (min, max);
return new Assignment (var, val);
}
public boolean almostEquals (Factor p, double epsilon)
{
return equals (p);
}
public Factor duplicate ()
{
return new UniformFactor (var, min, max);
}
public boolean isNaN ()
{
return Double.isNaN (min) || Double.isNaN (max);
}
public String dumpToString ()
{
return toString ();
}
public void multiplyBy (Factor other)
{
if (other instanceof ConstantFactor) {
val *= other.value (new Assignment ());
} else {
throw new UnsupportedOperationException ("Can't multiply uniform factor by "+other);
}
}
public void divideBy (Factor other)
{
if (other instanceof ConstantFactor) {
val /= other.value (new Assignment ());
} else {
throw new UnsupportedOperationException ("Can't divide uniform factor by "+other);
}
}
public String toString ()
{
return "[UniformFactor "+var+" "+min+" ... " +max+" ]";
}
public Factor slice (Assignment assn)
{
if (assn.containsVar (var)) {
return new ConstantFactor (value (assn));
} else return duplicate ();
}
}
| 3,054 | 22.143939 | 90 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/TableFactor.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import cc.mallet.grmm.util.Flops;
import cc.mallet.types.Matrix;
import cc.mallet.types.Matrixn;
import cc.mallet.util.Maths;
/**
* Created: Jan 4, 2006
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: TableFactor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class TableFactor extends AbstractTableFactor {
public static DiscreteFactor multiplyAll (Factor[] phis)
{
return multiplyAll (Arrays.asList (phis));
}
/**
* Returns the product of a collection of multinomial potentials.
*/
/// xxx once there are other types of potentials, this will need to
/// be refactored into a Factors static-utilities class.
public static AbstractTableFactor multiplyAll (Collection phis)
{
if (phis.size() == 1) {
Factor first = (Factor) phis.iterator ().next ();
return (AbstractTableFactor) first.duplicate ();
}
/* Get all the variables */
VarSet vs = new HashVarSet ();
for (Iterator it = phis.iterator (); it.hasNext ();) {
Factor phi = (Factor) it.next ();
vs.addAll (phi.varSet ());
}
/* define a new potential over the neighbors of NODE */
TableFactor newCPF = new TableFactor (vs);
for (Iterator it = phis.iterator (); it.hasNext ();) {
Factor phi = (Factor) it.next ();
newCPF.multiplyBy (phi);
}
return newCPF;
}
public TableFactor (Variable var)
{
super (var);
}
public TableFactor (Variable var, double[] values)
{
super (var, values);
}
public TableFactor ()
{
}
public TableFactor (BidirectionalIntObjectMap varMap)
{
super (varMap);
}
public TableFactor (Variable allVars [])
{
super (allVars);
}
public TableFactor (Collection allVars)
{
super (allVars);
}
public TableFactor (Variable[] allVars, double[] probs)
{
super (allVars, probs);
}
public TableFactor (VarSet allVars, double[] probs)
{
super (allVars, probs);
}
public TableFactor (Variable[] allVars, Matrix probsIn)
{
super (allVars, probsIn);
}
public TableFactor (AbstractTableFactor in)
{
super (in);
probs = (Matrix) in.getValueMatrix ().cloneMatrix ();
}
public TableFactor (VarSet allVars, Matrix probsIn)
{
super (allVars, probsIn);
}
public TableFactor (AbstractTableFactor ptl, double[] probs)
{
super (ptl, probs);
}
/**
* **********************************************************************
*/
void setAsIdentity ()
{
setAll (1.0);
}
public Factor duplicate ()
{
return new TableFactor (this);
}
protected AbstractTableFactor createBlankSubset (Variable[] vars)
{
return new TableFactor (vars);
}
/**
* Multiplies every entry in the potential by a constant
* such that all the entries sum to 1.
*/
public Factor normalize ()
{
Flops.increment (2 * probs.numLocations ());
probs.oneNormalize ();
return this;
}
public double sum ()
{
Flops.increment (probs.numLocations ());
return probs.oneNorm ();
}
public double logValue (AssignmentIterator it)
{
Flops.log ();
return Math.log (rawValue (it.indexOfCurrentAssn ()));
}
public double logValue (Assignment assn)
{
Flops.log ();
return Math.log (rawValue (assn));
}
public double logValue (int loc)
{
Flops.log ();
return Math.log (rawValue (loc));
}
public double value (Assignment assn)
{
return rawValue (assn);
}
public double value (int loc)
{
return rawValue (loc);
}
public double value (AssignmentIterator assn)
{
return rawValue (assn.indexOfCurrentAssn ());
}
protected Factor marginalizeInternal (AbstractTableFactor result)
{
result.setAll (0.0);
int[] projection = largeIdxToSmall (result);
/* Add each element of the single array of the large potential
to the correct element in the small potential. */
int numLocs = probs.numLocations ();
for (int largeLoc = 0; largeLoc < numLocs; largeLoc++) {
/* Convert a single-index from this distribution to
one for the smaller distribution */
int smallIdx = projection[largeLoc];
/* Whew! Now, add it in. */
double oldValue = this.probs.valueAtLocation (largeLoc);
result.probs.incrementSingleValue (smallIdx, oldValue);
}
Flops.increment (numLocs);
return result;
}
// Does destructive multiplication on this, assuming this has all
// the variables in pot.
protected void multiplyByInternal (DiscreteFactor ptl)
{
int[] projection = largeIdxToSmall (ptl);
int numLocs = probs.numLocations ();
for (int singleLoc = 0; singleLoc < numLocs; singleLoc++) {
int smallIdx = projection[singleLoc];
double prev = this.probs.valueAtLocation (singleLoc);
double newVal = ptl.value (smallIdx);
this.probs.setValueAtLocation (singleLoc, prev * newVal);
}
Flops.increment (numLocs);
}
// Does destructive divison on this, assuming this has all
// the variables in pot.
protected void divideByInternal (DiscreteFactor ptl)
{
int[] projection = largeIdxToSmall (ptl);
int numLocs = probs.numLocations ();
for (int singleLoc = 0; singleLoc < numLocs; singleLoc++) {
int smallIdx = projection[singleLoc];
double prev = this.probs.valueAtLocation (singleLoc);
double newVal = ptl.value (smallIdx);
double product = prev / newVal;
/* by convention, let dividing by zero just return 0 */
if (Maths.almostEquals (newVal, 0)) {
product = 0;
}
this.probs.setValueAtLocation (singleLoc, product);
}
Flops.increment (numLocs);
}
// Does destructive addition on this, assuming this has all
// the variables in pot.
protected void plusEqualsInternal (DiscreteFactor ptl)
{
int[] projection = largeIdxToSmall (ptl);
int numLocs = probs.numLocations ();
for (int singleLoc = 0; singleLoc < numLocs; singleLoc++) {
int smallIdx = projection[singleLoc];
double prev = this.probs.valueAtLocation (singleLoc);
double newVal = ptl.value (smallIdx);
this.probs.setValueAtLocation (singleLoc, prev + newVal);
}
Flops.increment (numLocs);
}
protected double rawValue (Assignment assn)
{
int numVars = getNumVars ();
int[] indices = new int[numVars];
for (int i = 0; i < numVars; i++) {
Variable var = getVariable (i);
indices[i] = assn.get (var);
}
double value = rawValue (indices);
return value;
}
private double rawValue (int[] indices)
{
// handle non-occuring indices specially, for default value is -Inf in log space.
int singleIdx = probs.singleIndex (indices);
return rawValue (singleIdx);
}
protected double rawValue (int singleIdx)
{
int loc = probs.location (singleIdx);
if (loc < 0) {
return 0;
} else {
return probs.valueAtLocation (loc);
}
}
public void exponentiate (double power)
{
for (int loc = 0; loc < probs.numLocations (); loc++) {
double oldVal = probs.valueAtLocation (loc);
double newVal = Math.pow (oldVal, power);
probs.setValueAtLocation (loc, newVal);
}
Flops.pow (probs.numLocations ());
}
/*
protected AbstractTableFactor ensureOperandCompatible (AbstractTableFactor ptl)
{
if (!(ptl instanceof TableFactor)) {
return new TableFactor (ptl);
} else {
return ptl;
}
}
*/
public void setLogValue (Assignment assn, double logValue)
{
Flops.exp ();
setRawValue (assn, Math.exp (logValue));
}
public void setLogValue (AssignmentIterator assnIt, double logValue)
{
Flops.exp ();
setRawValue (assnIt, Math.exp (logValue));
}
public void setValue (AssignmentIterator assnIt, double value)
{
setRawValue (assnIt, value);
}
public void setLogValues (double[] vals)
{
Flops.exp (vals.length);
for (int i = 0; i < vals.length; i++) {
setRawValue (i, Math.exp (vals[i]));
}
}
public void setValues (double[] vals)
{
for (int i = 0; i < vals.length; i++) {
setRawValue (i, vals[i]);
}
}
public void timesEquals (double v)
{
Flops.increment (probs.numLocations ());
probs.timesEquals (v);
}
protected void plusEqualsAtLocation (int loc, double v)
{
Flops.increment (1);
double oldVal = valueAtLocation (loc);
setRawValue (loc, oldVal + v);
}
public Matrix getValueMatrix ()
{
return probs;
}
public Matrix getLogValueMatrix ()
{
Flops.log (probs.numLocations ());
Matrix logProbs = (Matrix) probs.cloneMatrix ();
for (int loc = 0; loc < probs.numLocations (); loc++) {
logProbs.setValueAtLocation (loc, Math.log (logProbs.valueAtLocation (loc)));
}
return logProbs;
}
public double valueAtLocation (int idx)
{
return probs.valueAtLocation (idx);
}
/**
* Creates a new potential from another by restricting it to a given assignment.
*
* @param var Variable the new potential will be over
* @param observed Evidence to restrict to. Must give values for all variables in ptl.varSet() except for var.
* @return A DiscretePotential over var
*/
protected Factor slice_onevar (Variable var, Assignment observed)
{
double[] vals = new double [var.getNumOutcomes ()];
for (int i = 0; i < var.getNumOutcomes (); i++) {
Assignment toAssn = new Assignment (var, i);
Assignment union = Assignment.union (toAssn, observed);
vals[i] = value (union);
}
return new TableFactor (var, vals);
}
protected Factor slice_twovar (Variable v1, Variable v2, Assignment observed)
{
int N1 = v1.getNumOutcomes ();
int N2 = v2.getNumOutcomes ();
int[] szs = new int[]{N1, N2};
Variable[] varr = new Variable[] { v1, v2 };
int[] outcomes = new int[2];
double[] vals = new double [N1 * N2];
for (int i = 0; i < N1; i++) {
outcomes[0] = i;
for (int j = 0; j < N2; j++) {
outcomes[1] = j;
Assignment toVars = new Assignment (varr, outcomes);
Assignment assn = Assignment.union (toVars, observed);
int idx = Matrixn.singleIndex (szs, new int[]{i, j}); // Inefficient, but much less error prone
vals[idx] = value (assn);
}
}
return new TableFactor (new Variable[]{v1, v2}, vals);
}
protected Factor slice_general (Variable[] vars, Assignment observed)
{
VarSet toKeep = new HashVarSet (vars);
toKeep.removeAll (observed.varSet ());
double[] vals = new double [toKeep.weight ()];
AssignmentIterator it = toKeep.assignmentIterator ();
while (it.hasNext ()) {
Assignment union = Assignment.union (observed, it.assignment ());
vals[it.indexOfCurrentAssn ()] = value (union);
it.advance ();
}
return new TableFactor (toKeep, vals);
}
public static TableFactor makeFromLogValues (VarSet domain, double[] vals)
{
double[] vals2 = new double [vals.length];
for (int i = 0; i < vals.length; i++) {
vals2[i] = Math.exp (vals[i]);
}
return new TableFactor (domain, vals2);
}
public AbstractTableFactor recenter ()
{
int loc = argmax ();
double val = valueAtLocation (loc);
timesEquals (1.0 / val);
return this;
}
}
| 11,829 | 24.550756 | 113 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/AssignmentIterator.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import java.util.Iterator;
/**
* Iterates over the assignments to a set of variables.
* This is never instantiated by user code; instead, use
* one of the many assignmentIterator() methods.
*
* DOCTODO: Add note about difference between using this class and iterating
* over assignments.
* DOCTODO: Explain why advance() is useful instead of next.
*
* Created: Sun Nov 9 21:04:03 2003
*
* @author <a href="mailto:[email protected]">Charles Sutton</a>
* @version $Id: AssignmentIterator.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/public interface AssignmentIterator extends Iterator {
void advance();
int indexOfCurrentAssn ();
Assignment assignment ();
}
| 1,145 | 34.8125 | 81 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/AbstractFactor.java | /* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import java.util.Collection;
import cc.mallet.util.Randoms;
/**
* An Abstract class from which new Factor classes can be subclassed.
*
* Created: Sep 12, 2005
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: AbstractFactor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public abstract class AbstractFactor implements Factor {
protected VarSet vars;
protected AbstractFactor ()
{
vars = new HashVarSet ();
}
protected AbstractFactor (VarSet vars)
{
this.vars = vars;
}
protected abstract Factor extractMaxInternal (VarSet varSet);
protected abstract double lookupValueInternal (int i);
protected abstract Factor marginalizeInternal (VarSet varsToKeep);
public double value (Assignment assn)
{
return lookupValueInternal (assn.singleIndex ());
}
public double value (AssignmentIterator it)
{
return lookupValueInternal (it.indexOfCurrentAssn ());
}
public double phi (DenseAssignmentIterator it)
{
return lookupValueInternal (it.indexOfCurrentAssn ());
}
public Factor marginalize (Variable vars[])
{
return marginalizeInternal (new HashVarSet (vars));
}
public Factor marginalize (Collection vars)
{
return marginalizeInternal (new HashVarSet (vars));
}
public Factor marginalize (Variable var)
{
return marginalizeInternal (new HashVarSet (new Variable[] { var }));
}
public Factor marginalizeOut (Variable var)
{
HashVarSet vars = new HashVarSet (this.vars);
vars.remove (var);
return marginalizeInternal (vars);
}
public Factor marginalizeOut (VarSet varset)
{
HashVarSet vars = new HashVarSet (this.vars);
vars.remove (varset);
return marginalizeInternal (vars);
}
public Factor extractMax (Variable vars[])
{
return extractMaxInternal (new HashVarSet (vars));
}
public Factor extractMax (Collection vars)
{
return extractMaxInternal (new HashVarSet (vars));
}
public Factor extractMax (Variable var)
{
return extractMaxInternal (new HashVarSet (new Variable[] { var }));
}
// xxx should return an Assignment
public int argmax ()
{
throw new UnsupportedOperationException (toString());
}
public Assignment sample (Randoms r)
{
throw new UnsupportedOperationException (toString());
}
public double sum ()
{
throw new UnsupportedOperationException (toString());
}
public double entropy ()
{
throw new UnsupportedOperationException (toString());
}
public Factor multiply (Factor dist)
{
Factor dup = duplicate ();
dup.multiplyBy (dist);
return dup;
}
public void multiplyBy (Factor pot)
{
throw new UnsupportedOperationException ("Cannot multiply "+this+" by "+pot);
}
public void exponentiate (double power)
{
throw new UnsupportedOperationException ("Cannot exponentiate "+this+" by "+power);
}
public void divideBy (Factor pot)
{
throw new UnsupportedOperationException ("Cannot divide "+this+ " by "+pot);
}
public boolean isInLogSpace ()
{
return false;
}
public void logify ()
{
throw new UnsupportedOperationException (toString());
}
public void delogify ()
{
throw new UnsupportedOperationException (toString());
}
public Factor log ()
{
throw new UnsupportedOperationException (toString());
}
public boolean containsVar (Variable var)
{
return vars.contains (var);
}
public VarSet varSet ()
{
return vars;
}
public AssignmentIterator assignmentIterator ()
{
throw new UnsupportedOperationException (toString());
}
public boolean almostEquals (Factor p)
{
return almostEquals (p, 1e-5);
}
public double logValue (Assignment assn)
{
return Math.log (value (assn));
}
public double logValue (AssignmentIterator it)
{
return Math.log (value (it));
}
public double logValue (int loc)
{
throw new UnsupportedOperationException (toString());
}
public Variable getVariable (int i)
{
return vars.get (i);
}
public AbstractTableFactor asTable ()
{
throw new UnsupportedOperationException (toString());
}
protected void setVarSet (VarSet vars) { this.vars = vars; }
}
| 4,688 | 20.911215 | 87 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/UniNormalFactor.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import cc.mallet.util.Maths;
import cc.mallet.util.Randoms;
/**
* Univariate Gaussian factor.
* $Id: UniNormalFactor.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class UniNormalFactor extends AbstractFactor {
private Variable var;
private double mean;
private double variance;
public UniNormalFactor (Variable var, double mean, double variance)
{
super (new HashVarSet (new Variable[] { var }));
if (!var.isContinuous ()) throw new IllegalArgumentException ();
if (variance <= 0) throw new IllegalArgumentException ();
this.var = var;
this.mean = mean;
this.variance = variance;
}
//
protected Factor extractMaxInternal (VarSet varSet)
{
throw new UnsupportedOperationException ();
}
public double value (Assignment assn)
{
double x = assn.getDouble (var);
return 1/Math.sqrt(2*Math.PI*variance) * Math.exp (-1/(2.0 * variance) * (x - mean)*(x-mean));
}
protected double lookupValueInternal (int i)
{
throw new UnsupportedOperationException ();
}
protected Factor marginalizeInternal (VarSet varsToKeep)
{
if (varsToKeep.contains (var)) {
return duplicate ();
} else {
return new ConstantFactor (1.0);
}
}
public Factor normalize ()
{
return this;
}
public Assignment sample (Randoms r)
{
double val = r.nextGaussian (mean, variance);
return new Assignment (var, val);
}
public boolean almostEquals (Factor p, double epsilon)
{
return equals (p);
}
public Factor duplicate ()
{
return new UniNormalFactor (var, mean, variance);
}
public boolean isNaN ()
{
return Double.isNaN (mean) || Double.isNaN (variance);
}
public String dumpToString ()
{
return toString ();
}
public String toString ()
{
return "[NormalFactor "+var+" "+mean+" ... " +variance+" ]";
}
public Factor slice (Assignment assn)
{
if (assn.containsVar (var)) {
return new ConstantFactor (value (assn));
} else return duplicate ();
}
public void multiplyBy (Factor f)
{
if (f instanceof ConstantFactor) {
double val = f.value (new Assignment());
// NormalFactor must be normalized right now...
if (Maths.almostEquals (val, 1.0)) {
return; // ok, it's an identity factor
}
}
throw new UnsupportedOperationException ("Can't multiply NormalFactor by "+f);
}
public void divideBy (Factor f)
{
if (f instanceof ConstantFactor) {
double val = f.value (new Assignment());
// NormalFactor must be normalized right now...
if (Maths.almostEquals (val, 1.0)) {
return; // ok, it's an identity factor
}
}
throw new UnsupportedOperationException ("Can't divide NormalFactor by "+f);
}
}
| 3,222 | 23.792308 | 98 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/Universe.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import gnu.trove.TIntArrayList;
import gnu.trove.TIntObjectHashMap;
import gnu.trove.THashMap;
import java.io.Serializable;
import java.io.ObjectOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.util.List;
import java.util.ArrayList;
/**
* A global mapping between variables and indices.
* All variables belong to exactly one universe.
* $Id: Universe.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class Universe implements Serializable {
private BidirectionalIntObjectMap variableAlphabet;
public static final Universe DEFAULT = new Universe ();
public Universe ()
{
variableAlphabet = new BidirectionalIntObjectMap ();
}
public int add (Variable var)
{
return variableAlphabet.lookupIndex (var, true);
}
public Variable get (int idx)
{
return (Variable) variableAlphabet.lookupObject (idx);
}
public int getIndex (Variable var)
{
return variableAlphabet.lookupIndex (var);
}
public int size ()
{
return variableAlphabet.size ();
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException
{
out.defaultWriteObject ();
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject (variableAlphabet.toArray ());
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException
{
in.defaultReadObject ();
int version = in.readInt ();
Object[] vars = (Object[]) in.readObject ();
variableAlphabet = new BidirectionalIntObjectMap (vars.length);
for (int vi = 0; vi < vars.length; vi++) {
add ((Variable) vars[vi]);
}
}
// maintaining global projection caches
static THashMap allProjectionCaches = new THashMap ();
public TIntObjectHashMap lookupProjectionCache (VarSet varSet)
{
List sizes = new ArrayList (varSet.size ());
for (int vi = 0; vi < varSet.size (); vi++) {
sizes.add (varSet.get(vi).getNumOutcomes ());
}
TIntObjectHashMap result = (TIntObjectHashMap) allProjectionCaches.get (sizes);
if (result == null) {
result = new TIntObjectHashMap ();
allProjectionCaches.put (sizes, result);
}
return result;
}
}
| 2,735 | 26.636364 | 91 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/BidirectionalIntObjectMap.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://mallet.cs.umass.edu/
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import java.io.*;
import java.util.ArrayList;
import java.util.Iterator;
/**
* A mapping between integers and objects where the mapping in each
* direction is efficient. Integers are assigned consecutively, starting
* at zero, as objects are added to map. Objects can not be
* deleted from the map and thus the integers are never reused.
* <p/>
* This class is exactly like Alphabet in MALLET, except that it does
* not do the Serialization magic to ensure that two alphabets that
* are desrialized from the same file are ==. This avoids memory leaks,
* because MALLET Alphabets are retained in memory forever.
*/
public class BidirectionalIntObjectMap implements Serializable {
gnu.trove.TObjectIntHashMap map;
ArrayList entries;
boolean growthStopped = false;
public BidirectionalIntObjectMap (int capacity)
{
this.map = new gnu.trove.TObjectIntHashMap (capacity);
this.entries = new ArrayList (capacity);
}
public BidirectionalIntObjectMap ()
{
this (8);
}
public BidirectionalIntObjectMap (BidirectionalIntObjectMap other)
{
map = (gnu.trove.TObjectIntHashMap) other.map.clone ();
entries = (ArrayList) other.entries.clone ();
growthStopped = other.growthStopped;
}
/**
* Return -1 if entry isn't present.
*/
public int lookupIndex (Object entry, boolean addIfNotPresent)
{
if (entry == null) {
throw new IllegalArgumentException ("Can't lookup \"null\" in an Alphabet.");
}
int retIndex = -1;
if (map.containsKey (entry)) {
retIndex = map.get (entry);
} else if (!growthStopped && addIfNotPresent) {
retIndex = entries.size ();
map.put (entry, retIndex);
entries.add (entry);
}
return retIndex;
}
public int lookupIndex (Object entry)
{
return lookupIndex (entry, true);
}
public Object lookupObject (int index)
{
return entries.get (index);
}
public Object[] toArray ()
{
return entries.toArray ();
}
/**
* Returns an array containing all the entries in the Alphabet.
* The runtime type of the returned array is the runtime type of in.
* If in is large enough to hold everything in the alphabet, then it
* it used. The returned array is such that for all entries <tt>obj</tt>,
* <tt>ret[lookupIndex(obj)] = obj</tt> .
*/
public Object[] toArray (Object[] in)
{
return entries.toArray (in);
}
// xxx This should disable the iterator's remove method...
public Iterator iterator ()
{
return entries.iterator ();
}
public Object[] lookupObjects (int[] indices)
{
Object[] ret = new Object[indices.length];
for (int i = 0; i < indices.length; i++)
ret[i] = entries.get (indices[i]);
return ret;
}
/**
* Returns an array of the objects corresponding to
*
* @param indices An array of indices to look up
* @param buf An array to store the returned objects in.
* @return An array of values from this Alphabet. The runtime type of the array is the same as buf
*/
public Object[] lookupObjects (int[] indices, Object[] buf)
{
for (int i = 0; i < indices.length; i++)
buf[i] = entries.get (indices[i]);
return buf;
}
public int[] lookupIndices (Object[] objects, boolean addIfNotPresent)
{
int[] ret = new int[objects.length];
for (int i = 0; i < objects.length; i++)
ret[i] = lookupIndex (objects[i], addIfNotPresent);
return ret;
}
public boolean contains (Object entry)
{
return map.contains (entry);
}
public int size ()
{
return entries.size ();
}
public void stopGrowth ()
{
growthStopped = true;
}
public void startGrowth ()
{
growthStopped = false;
}
public boolean growthStopped ()
{
return growthStopped;
}
/**
* Return String representation of all Alphabet entries, each
* separated by a newline.
*/
public String toString ()
{
StringBuffer sb = new StringBuffer ();
for (int i = 0; i < entries.size (); i++) {
sb.append (entries.get (i).toString ());
sb.append ('\n');
}
return sb.toString ();
}
public void dump () { dump (System.out); }
public void dump (PrintStream out)
{
dump (new PrintWriter (new OutputStreamWriter (out), true));
}
public void dump (PrintWriter out)
{
for (int i = 0; i < entries.size (); i++) {
out.println (i + " => " + entries.get (i));
}
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException
{
out.writeInt (BidirectionalIntObjectMap.CURRENT_SERIAL_VERSION);
out.writeInt (entries.size ());
for (int i = 0; i < entries.size (); i++)
out.writeObject (entries.get (i));
out.writeBoolean (growthStopped);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException
{
in.readInt (); // int version
int size = in.readInt ();
entries = new ArrayList (size);
map = new gnu.trove.TObjectIntHashMap (size);
for (int i = 0; i < size; i++) {
Object o = in.readObject ();
map.put (o, i);
entries. add (o);
}
growthStopped = in.readBoolean ();
}
}
| 5,714 | 25.830986 | 101 | java |
twitter_nlp | twitter_nlp-master/mallet-2.0.6/src/cc/mallet/grmm/types/ListVarSet.java | /* Copyright (C) 2006 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.grmm.types;
import gnu.trove.THashSet;
import gnu.trove.TIntArrayList;
import java.util.AbstractSet;
import java.util.BitSet;
import java.util.Collection;
import java.util.Set;
import java.io.ObjectOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.Serializable;
import cc.mallet.grmm.inference.Utils;
/**
* A clique that uses very little time and memory based on the flyweight
* pattern, in the same way as BitVarSet. This implementation uses an
* ArrayList of indices, and is likely to be more memory-efficient when the
* Universe is very, very large.
*
* @author Charles Sutton
* @version $Id: ListVarSet.java,v 1.1 2007/10/22 21:37:44 mccallum Exp $
*/
public class ListVarSet extends AbstractSet implements VarSet, Serializable {
transient private Universe universe;
transient private TIntArrayList included;
public ListVarSet (Universe universe, Collection included)
{
this.universe = universe;
this.included = new TIntArrayList (included.size ());
java.util.Iterator it = included.iterator();
while (it.hasNext()) {
this.included.add (universe.getIndex ((Variable) it.next ()));
}
this.included.sort ();
}
public ListVarSet (VarSet vsOld)
{
this (vsOld.get(0).getUniverse (), vsOld);
}
public boolean add (Object o)
{
int idx = universe.getIndex ((Variable) o);
if (idx == -1)
throw new UnsupportedOperationException();
included.add (idx);
included.sort ();
return true;
}
public Variable get(int idx)
{
int gidx = included.get (idx);
return universe.get (gidx);
}
public Variable[] toVariableArray()
{
return (Variable[]) toArray (new Variable[0]);
}
// FIXME cache not updated on changes to the clique
private int cachedWeight = -1;
public int weight()
{
if (cachedWeight == -1) {
int weight = 1;
ListVarSet.Iterator it = new ListVarSet.Iterator ();
while (it.hasNext()) {
Variable var = (Variable) it.next();
weight *= var.getNumOutcomes();
}
cachedWeight = weight;
}
return cachedWeight;
}
public AssignmentIterator assignmentIterator()
{
return new DenseAssignmentIterator (this);
}
public int size()
{
return included.size ();
}
public boolean isEmpty()
{
return included.isEmpty();
}
public boolean contains(Object o)
{
return included.contains (universe.getIndex ((Variable) o));
}
private class Iterator implements java.util.Iterator {
int nextIdx;
public Iterator () { nextIdx = 0; }
public boolean hasNext()
{
return (nextIdx < included.size ());
}
public Object next()
{
int thisIdx = nextIdx;
nextIdx++;
return universe.get (included.get (thisIdx));
}
public void remove()
{
throw new UnsupportedOperationException("Removal from BitSetClique not permitted");
}
}
public java.util.Iterator iterator()
{
return new ListVarSet.Iterator ();
}
public boolean equals (Object o)
{
if (this == o) return true;
if (!(o instanceof VarSet)) return false;
VarSet vs = (VarSet) o;
return (vs.size () == size()) && containsAll (vs);
}
public int hashCode ()
{
int result = 39;
for (int vi = 0; vi < size(); vi++) {
result = 59 * result + get(vi).hashCode ();
}
return result;
}
public VarSet intersection (VarSet c)
{
return Utils.defaultIntersection (this, c);
}
public void clear()
{
included.clear();
}
public String toString ()
{
String foo = "(C";
ListVarSet.Iterator it = new ListVarSet.Iterator ();
while (it.hasNext()) {
Variable var = (Variable) it.next();
foo = foo + " " + var;
}
foo = foo + ")";
return foo;
}
// Serialization garbage
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException
{
out.defaultWriteObject ();
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject (universe);
out.writeObject (included.toNativeArray ());
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException
{
in.defaultReadObject ();
int version = in.readInt ();
universe = (Universe) in.readObject ();
int[] vals = (int[]) in.readObject ();
included = new TIntArrayList (vals);
}
}
| 4,905 | 21.608295 | 91 | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.